ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
5022c468-76d4-4d66-9465-6171e0aac452 | cpp | tensorflow/tensorflow | tf_op_registry | tensorflow/core/ir/tf_op_registry.cc | tensorflow/core/ir/tf_op_registry_test.cc | #include "tensorflow/core/ir/tf_op_registry.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/ops.h"
namespace mlir {
namespace tfg {
TensorFlowOpRegistryInterface::TensorFlowOpRegistryInterface(Dialect *dialect)
: TensorFlowOpRegistryInterface(dialect, tensorflow::OpRegistry::Global()) {
}
static bool IsStatefulImpl(const tensorflow::OpRegistry *registry,
StringRef op_name) {
const tensorflow::OpRegistrationData *op_reg_data =
registry->LookUp(op_name.str());
if (!op_reg_data) return true;
return op_reg_data->op_def.is_stateful();
}
bool TensorFlowOpRegistryInterface::isStateful(Operation *op) const {
if (op->hasTrait<OpTrait::IntrinsicOperation>()) return false;
if (auto func = dyn_cast<GraphFuncOp>(op)) return func.getIsStateful();
StringRef op_name = op->getName().stripDialect();
if (op->getNumRegions() && op_name.ends_with("Region"))
op_name = op_name.drop_back(6);
return IsStatefulImpl(registry_, op_name);
}
}
} | #include "tensorflow/core/ir/tf_op_registry.h"
#include <string>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
void PrepareContext(MLIRContext *context) {
DialectRegistry registry;
registry.insert<TFGraphDialect>();
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<TensorFlowOpRegistryInterface>();
});
context->appendDialectRegistry(registry);
}
TEST(TensorFlowOpRegistryInterface, TestIntrinsicOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32>) -> (tensor<i32>) {
return(%arg) : tensor<i32>
}
)mlir";
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
auto func_op = cast<GraphFuncOp>(&module->front());
auto ret_op = cast<ReturnOp>(func_op.getBody().front().getTerminator());
EXPECT_FALSE(dyn_cast<TensorFlowRegistryInterface>(*func_op));
EXPECT_FALSE(dyn_cast<TensorFlowRegistryInterface>(*ret_op));
}
TEST(TensorFlowOpRegistryInterface, TestStatelessTFOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code = R"mlir(
tfg.func @test(%lhs: tensor<i32>, %rhs: tensor<i32>) -> (tensor<i32>) {
%Add, %ctl = Add(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *add =
&cast<GraphFuncOp>(&module->front()).getBody().front().front();
auto iface = dyn_cast<TensorFlowRegistryInterface>(add);
ASSERT_TRUE(iface);
EXPECT_FALSE(iface.isStateful());
}
TEST(TensorFlowOpRegistryInterface, TestStatelessAndStatefulRegionOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code_template = R"mlir(
tfg.func @test(%idx: tensor<i32>, %arg: tensor<i32>) -> (tensor<i32>) {{
%Case, %ctl = {0}CaseRegion %idx {{
yield(%arg) : tensor<i32>
} : (tensor<i32>) -> (tensor<i32>)
return(%Case) : tensor<i32>
}
)mlir";
SmallVector<StringRef, 2> prefixes = {"", "Stateless"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(prefixes, expected)) {
std::string code = llvm::formatv(code_template, std::get<0>(it)).str();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *case_op =
&cast<GraphFuncOp>(&module->front()).getBody().front().front();
auto iface = dyn_cast<TensorFlowRegistryInterface>(case_op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ade33ad4-1708-4f0a-a0ca-5ac3d17384c5 | cpp | google/tsl | cpu_utils | tsl/platform/profile_utils/cpu_utils.cc | tsl/platform/profile_utils/cpu_utils_test.cc | #include "tsl/platform/profile_utils/cpu_utils.h"
#include <fstream>
#include <limits>
#include <mutex>
#if defined(_WIN32)
#include <windows.h>
#endif
#if defined(__APPLE__)
#include <sys/sysctl.h>
#endif
#include "absl/base/call_once.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/profile_utils/android_armv7a_cpu_utils_helper.h"
namespace tsl {
namespace profile_utils {
constexpr int64_t CpuUtils::INVALID_FREQUENCY;
static ICpuUtilsHelper* cpu_utils_helper_instance_ = nullptr;
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
(defined(__s390x__))
uint64 CpuUtils::GetCycleCounterFrequency() {
static const uint64 cpu_frequency = GetCycleCounterFrequencyImpl();
return cpu_frequency;
}
#else
int64_t CpuUtils::GetCycleCounterFrequency() {
static const int64_t cpu_frequency = GetCycleCounterFrequencyImpl();
return cpu_frequency;
}
#endif
double CpuUtils::GetMicroSecPerClock() {
static const double micro_sec_per_clock =
(1000.0 * 1000.0) / static_cast<double>(GetCycleCounterFrequency());
return micro_sec_per_clock;
}
void CpuUtils::ResetClockCycle() {
GetCpuUtilsHelperSingletonInstance().ResetClockCycle();
}
void CpuUtils::EnableClockCycleProfiling() {
GetCpuUtilsHelperSingletonInstance().EnableClockCycleProfiling();
}
void CpuUtils::DisableClockCycleProfiling() {
GetCpuUtilsHelperSingletonInstance().DisableClockCycleProfiling();
}
std::chrono::duration<double> CpuUtils::ConvertClockCycleToTime(
const int64_t clock_cycle) {
return std::chrono::duration<double>(static_cast<double>(clock_cycle) /
GetCycleCounterFrequency());
}
int64_t CpuUtils::GetCycleCounterFrequencyImpl() {
#if defined(__ANDROID__)
return GetCpuUtilsHelperSingletonInstance().CalculateCpuFrequency();
#elif defined(__linux__)
std::ifstream cpuinfo("/proc/cpuinfo");
if (!cpuinfo) {
LOG(WARNING) << "Failed to open /proc/cpuinfo";
return INVALID_FREQUENCY;
}
string line;
while (std::getline(cpuinfo, line)) {
double cpu_freq = 0.0;
int retval = 0;
double freq_factor = 2.0;
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
retval = sscanf(line.c_str(), "clock : %lfMHz", &cpu_freq);
freq_factor = 1.0;
#elif defined(__s390x__)
retval = sscanf(line.c_str(), "bogomips per cpu: %lf", &cpu_freq);
#elif defined(__aarch64__)
retval = sscanf(line.c_str(), "BogoMIPS : %lf", &cpu_freq);
#else
retval = sscanf(line.c_str(), "bogomips : %lf", &cpu_freq);
#endif
if (retval > 0) {
const double freq_ghz = cpu_freq / 1000.0 / freq_factor;
if (retval != 1 || freq_ghz < 0.01) {
LOG(WARNING) << "Failed to get CPU frequency: " << freq_ghz << " GHz";
return INVALID_FREQUENCY;
}
const int64_t freq_n =
static_cast<int64_t>(freq_ghz * 1000.0 * 1000.0 * 1000.0);
VLOG(1) << "CPU Frequency: " << freq_n << " Hz";
return freq_n;
}
}
LOG(WARNING)
<< "Failed to find bogomips or clock in /proc/cpuinfo; cannot determine "
"CPU frequency";
return INVALID_FREQUENCY;
#elif defined(__APPLE__)
int64_t freq_hz = 0;
size_t freq_hz_size = sizeof(freq_hz);
int retval =
sysctlbyname("hw.cpufrequency_max", &freq_hz, &freq_hz_size, NULL, 0);
if (retval != 0 || freq_hz < 1e6) {
int64_t tbfrequency = 0;
size_t tbfrequency_size = sizeof(tbfrequency);
retval = sysctlbyname("hw.tbfrequency", &tbfrequency, &tbfrequency_size,
NULL, 0);
if (retval == 0) {
clockinfo clock_info;
size_t clock_info_size = sizeof(clock_info);
retval = sysctlbyname("kern.clockrate", &clock_info, &clock_info_size,
NULL, 0);
if (retval == 0) {
freq_hz = clock_info.hz * tbfrequency;
}
}
if (retval != 0 || freq_hz < 1e6) {
LOG(WARNING) << "Failed to get CPU frequency: " << freq_hz << " Hz";
return INVALID_FREQUENCY;
}
}
return freq_hz;
#elif defined(_WIN32)
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
return freq.QuadPart;
#else
return INVALID_FREQUENCY;
#endif
}
ICpuUtilsHelper& CpuUtils::GetCpuUtilsHelperSingletonInstance() {
static absl::once_flag flag;
absl::call_once(flag, []() {
if (cpu_utils_helper_instance_ != nullptr) {
LOG(FATAL) << "cpu_utils_helper_instance_ is already instantiated.";
}
#if defined(__ANDROID__) && (__ANDROID_API__ >= 21) && \
(defined(__ARM_ARCH_7A__) || defined(__aarch64__))
cpu_utils_helper_instance_ = new AndroidArmV7ACpuUtilsHelper();
#else
cpu_utils_helper_instance_ = new DefaultCpuUtilsHelper();
#endif
});
return *cpu_utils_helper_instance_;
}
}
} | #include "tsl/platform/profile_utils/cpu_utils.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/profile_utils/clock_cycle_profiler.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profile_utils {
static constexpr bool DBG = false;
class CpuUtilsTest : public ::testing::Test {
protected:
void SetUp() override { CpuUtils::EnableClockCycleProfiling(); }
};
TEST_F(CpuUtilsTest, SetUpTestCase) {}
TEST_F(CpuUtilsTest, TearDownTestCase) {}
TEST_F(CpuUtilsTest, CheckGetCurrentClockCycle) {
static constexpr int LOOP_COUNT = 10;
const uint64 start_clock_count = CpuUtils::GetCurrentClockCycle();
CHECK_GT(start_clock_count, 0);
uint64 prev_clock_count = start_clock_count;
for (int i = 0; i < LOOP_COUNT; ++i) {
const uint64 clock_count = CpuUtils::GetCurrentClockCycle();
CHECK_GE(clock_count, prev_clock_count);
prev_clock_count = clock_count;
}
const uint64 end_clock_count = CpuUtils::GetCurrentClockCycle();
if (DBG) {
LOG(INFO) << "start clock = " << start_clock_count;
LOG(INFO) << "end clock = " << end_clock_count;
LOG(INFO) << "average clock = "
<< ((end_clock_count - start_clock_count) / LOOP_COUNT);
}
}
TEST_F(CpuUtilsTest, CheckCycleCounterFrequency) {
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
(defined(__s390x__))
const uint64 cpu_frequency = CpuUtils::GetCycleCounterFrequency();
CHECK_GT(cpu_frequency, 0);
CHECK_NE(cpu_frequency, unsigned(CpuUtils::INVALID_FREQUENCY));
#else
const int64_t cpu_frequency = CpuUtils::GetCycleCounterFrequency();
CHECK_GT(cpu_frequency, 0);
CHECK_NE(cpu_frequency, CpuUtils::INVALID_FREQUENCY);
#endif
if (DBG) {
LOG(INFO) << "Cpu frequency = " << cpu_frequency;
}
}
TEST_F(CpuUtilsTest, CheckMicroSecPerClock) {
const double micro_sec_per_clock = CpuUtils::GetMicroSecPerClock();
CHECK_GT(micro_sec_per_clock, 0.0);
if (DBG) {
LOG(INFO) << "Micro sec per clock = " << micro_sec_per_clock;
}
}
TEST_F(CpuUtilsTest, SimpleUsageOfClockCycleProfiler) {
static constexpr int LOOP_COUNT = 10;
ClockCycleProfiler prof;
for (int i = 0; i < LOOP_COUNT; ++i) {
prof.Start();
prof.Stop();
}
EXPECT_EQ(LOOP_COUNT, static_cast<int>(prof.GetCount() + 0.5));
if (DBG) {
prof.DumpStatistics("CpuUtilsTest");
}
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/profile_utils/cpu_utils.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/profile_utils/cpu_utils_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
78838912-1249-42c4-bc35-dbab34023c54 | cpp | tensorflow/tensorflow | async_value | third_party/xla/xla/tsl/concurrency/async_value.cc | third_party/xla/xla/tsl/concurrency/async_value_test.cc | #include "xla/tsl/concurrency/async_value.h"
#include <atomic>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/span.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/logging.h"
namespace tsl {
class NotifierListNode {
public:
explicit NotifierListNode(absl::AnyInvocable<void()> notification)
: next_(nullptr), notification_(std::move(notification)) {}
private:
friend class AsyncValue;
NotifierListNode* next_;
absl::AnyInvocable<void()> notification_;
};
uint16_t AsyncValue::CreateTypeInfoAndReturnTypeIdImpl(
const TypeInfo& type_info) {
size_t type_id = GetTypeInfoTableSingleton()->emplace_back(type_info) + 1;
DCHECK(type_id < std::numeric_limits<uint16_t>::max())
<< "Too many different AsyncValue types.";
return type_id;
}
AsyncValue::TypeInfoTable* AsyncValue::GetTypeInfoTableSingleton() {
constexpr int kInitialCapacity = 64;
static auto* type_info_table = new TypeInfoTable(kInitialCapacity);
return type_info_table;
}
std::atomic<size_t> AsyncValue::total_allocated_async_values_;
void AsyncValue::NotifyAvailable(State available_state) {
DCHECK((kind() == Kind::kConcrete || kind() == Kind::kIndirect))
<< "Should only be used by ConcreteAsyncValue or IndirectAsyncValue";
DCHECK(available_state == State::kConcrete ||
available_state == State::kError);
auto old_value = waiters_and_state_.exchange(
WaitersAndState(nullptr, available_state), std::memory_order_acq_rel);
DCHECK(old_value.state() == State::kUnconstructed ||
old_value.state() == State::kConstructed);
RunWaiters(old_value.waiter());
}
void AsyncValue::RunWaiters(NotifierListNode* list) {
while (list) {
NotifierListNode* node = list;
node->notification_();
list = node->next_;
delete node;
}
}
void AsyncValue::EnqueueWaiter(absl::AnyInvocable<void()> waiter,
WaitersAndState old_value) {
auto* node = new NotifierListNode(std::move(waiter));
auto old_state = old_value.state();
node->next_ = old_value.waiter();
auto new_value = WaitersAndState(node, old_state);
while (!waiters_and_state_.compare_exchange_weak(old_value, new_value,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
if (old_value.state() == State::kConcrete ||
old_value.state() == State::kError) {
DCHECK(old_value.waiter() == nullptr);
node->notification_();
delete node;
return;
}
node->next_ = old_value.waiter();
}
DCHECK(old_value.state() == State::kUnconstructed ||
old_value.state() == State::kConstructed);
}
void AsyncValue::SetError(absl::Status status) {
DCHECK(!status.ok());
if (kind() == Kind::kConcrete) {
GetTypeInfo().set_error(this, std::move(status));
} else {
DCHECK(kind() == Kind::kIndirect);
auto error_av = MakeErrorAsyncValueRef(std::move(status));
static_cast<IndirectAsyncValue*>(this)->ForwardTo(std::move(error_av));
}
}
void IndirectAsyncValue::ForwardTo(RCReference<AsyncValue> value) {
DCHECK(IsUnavailable());
auto s = value->state();
if (s == State::kConcrete || s == State::kError) {
DCHECK(!value_) << "IndirectAsyncValue::ForwardTo is called more than once";
auto* concrete_value = value.release();
if (concrete_value->kind() == Kind::kIndirect) {
auto* indirect_value = static_cast<IndirectAsyncValue*>(concrete_value);
concrete_value = indirect_value->value_;
DCHECK(concrete_value != nullptr);
DCHECK(concrete_value->kind() == Kind::kConcrete);
concrete_value->AddRef();
indirect_value->DropRef();
}
DCHECK(type_id_ == kUnknownTypeId || type_id_ == concrete_value->type_id_ ||
concrete_value->IsType<DummyValueForErrorAsyncValue>())
<< "IndirectAsyncValue::ForwardTo value has an unexpected type id";
value_ = concrete_value;
type_id_ = concrete_value->type_id_;
NotifyAvailable(s);
} else {
AsyncValue* av = value.get();
av->AndThen([self = FormRef(this), value = std::move(value)]() mutable {
self->ForwardTo(std::move(value));
});
}
}
void BlockUntilReady(AsyncValue* async_value) {
if (ABSL_PREDICT_TRUE(async_value->IsAvailable())) return;
absl::BlockingCounter cnt(1);
async_value->AndThen([&] { cnt.DecrementCount(); });
cnt.Wait();
}
void RunWhenReady(absl::Span<AsyncValue* const> values,
absl::AnyInvocable<void()> callee) {
absl::InlinedVector<AsyncValue*, 4> unavailable_values;
for (auto i : values) {
if (!i->IsAvailable()) unavailable_values.push_back(i);
}
if (unavailable_values.empty()) return callee();
if (unavailable_values.size() == 1) {
unavailable_values[0]->AndThen(
[callee = std::move(callee)]() mutable { callee(); });
return;
}
struct CounterAndCallee {
std::atomic<size_t> counter;
absl::AnyInvocable<void()> callee;
};
auto* data =
new CounterAndCallee{{unavailable_values.size()}, std::move(callee)};
for (auto* val : unavailable_values) {
val->AndThen([data]() {
if (data->counter.fetch_sub(1) != 1) return;
data->callee();
delete data;
});
}
}
void RunWhenReady(absl::Span<RCReference<AsyncValue> const> values,
absl::AnyInvocable<void()> callee) {
absl::InlinedVector<AsyncValue*, 8> pointers;
pointers.reserve(values.size());
for (const auto& ref : values) {
pointers.push_back(ref.get());
}
RunWhenReady(pointers, std::move(callee));
}
} | #include "xla/tsl/concurrency/async_value.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(AsyncValueTest, ConstructedToError) {
AsyncValue* value = MakeConstructedAsyncValueRef<int32_t>(123).release();
bool callback_triggered = false;
EXPECT_TRUE(value->IsConstructed());
EXPECT_FALSE(value->IsConcrete());
EXPECT_FALSE(value->IsAvailable());
value->AndThen([&] { callback_triggered = true; });
EXPECT_FALSE(callback_triggered);
value->SetError(absl::InternalError("test error"));
EXPECT_TRUE(callback_triggered);
EXPECT_TRUE(value->IsAvailable());
EXPECT_FALSE(value->IsConcrete());
EXPECT_TRUE(value->IsError());
value->DropRef();
}
TEST(AsyncValueTest, ConstructedToConcrete) {
AsyncValue* value = MakeConstructedAsyncValueRef<int32_t>(123).release();
EXPECT_TRUE(value->IsConstructed());
EXPECT_FALSE(value->IsConcrete());
EXPECT_FALSE(value->IsAvailable());
value->AndThen([] {});
value->SetStateConcrete();
EXPECT_TRUE(value->IsAvailable());
EXPECT_TRUE(value->IsConcrete());
EXPECT_FALSE(value->IsError());
EXPECT_EQ(123, value->get<int32_t>());
value->DropRef();
}
TEST(AsyncValueTest, UnconstructedEmplace) {
AsyncValue* value = MakeUnconstructedAsyncValueRef<int32_t>().release();
EXPECT_FALSE(value->IsConstructed());
EXPECT_FALSE(value->IsConcrete());
EXPECT_FALSE(value->IsAvailable());
value->AndThen([] {});
value->emplace<int32_t>(123);
EXPECT_FALSE(value->IsConstructed());
EXPECT_TRUE(value->IsAvailable());
EXPECT_TRUE(value->IsConcrete());
EXPECT_EQ(123, value->get<int32_t>());
value->DropRef();
}
TEST(AsyncValueTest, AddAndDropRef) {
AsyncValue* value = MakeConstructedAsyncValueRef<int32_t>(123).release();
value->AndThen([] {});
value->SetStateConcrete();
EXPECT_TRUE(value->IsConcrete());
EXPECT_TRUE(value->IsUnique());
value->AddRef();
EXPECT_FALSE(value->IsUnique());
EXPECT_EQ(123, value->get<int32_t>());
value->DropRef();
EXPECT_TRUE(value->IsUnique());
value->DropRef();
}
TEST(AsyncValueTest, KeepPayloadOnError) {
int payload_value = 0;
struct Payload : AsyncPayload::KeepOnError {
explicit Payload(int* value) : value{value} { *value = 1; }
~Payload() { *value = 2; }
int* value;
};
{
AsyncValueRef<Payload> value =
MakeConstructedAsyncValueRef<Payload>(&payload_value);
EXPECT_EQ(1, *value->value);
value.SetStateConcrete();
EXPECT_EQ(1, *value->value);
EXPECT_TRUE(!value.IsError());
}
EXPECT_EQ(2, payload_value);
{
AsyncValueRef<Payload> value =
MakeConstructedAsyncValueRef<Payload>(&payload_value);
EXPECT_TRUE(!value.IsError());
value.SetError(absl::InternalError("error"));
EXPECT_EQ(1, *value->value);
EXPECT_TRUE(value.IsError());
EXPECT_EQ("error", value.GetError().message());
}
EXPECT_EQ(2, payload_value);
}
TEST(AsyncValueTest, StackAllocatedAsyncValue) {
int32_t counter = 0;
class Payload {
public:
explicit Payload(int32_t& counter) : counter_{counter} { counter_++; }
~Payload() { counter_++; }
int32_t count() const { return counter_; }
private:
int32_t& counter_;
};
internal::AsyncValueStorage<Payload> storage;
AsyncValueOwningRef<Payload> owner =
MakeConstructedAsyncValueRef<Payload>(storage, counter);
AsyncValuePtr<Payload> ptr = owner.AsPtr();
AsyncValue* value = ptr.value();
EXPECT_TRUE(value->IsConstructed());
EXPECT_FALSE(value->IsAvailable());
EXPECT_EQ(1, counter);
EXPECT_EQ(1, ptr->count());
ptr.SetStateConcrete();
EXPECT_TRUE(ptr.IsAvailable());
std::make_unique<AsyncValueOwningRef<Payload>>(std::move(owner));
EXPECT_EQ(2, counter);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3028451e-22e9-4606-b27f-2af99eba21ae | cpp | tensorflow/tensorflow | node_def_util | tensorflow/core/framework/node_def_util.cc | tensorflow/core/framework/node_def_util_test.cc | #include "tensorflow/core/framework/node_def_util.h"
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/scanner.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const char* const kColocationAttrName = "_class";
const char* const kColocationGroupPrefix = "loc:@";
const char* const kTpuExecuteStagingOp = "IdentityN";
const char* const kTpuExecuteStagingNodeName = "_variable_copy";
AttrSlice::AttrSlice() : ndef_(nullptr) {
static const AttrValueMap* const kEmptyAttrValueMap = new AttrValueMap;
attrs_ = kEmptyAttrValueMap;
}
AttrSlice::AttrSlice(const NodeDef& node_def)
: ndef_(&node_def), attrs_(nullptr) {}
AttrSlice::AttrSlice(const AttrValueMap* a) : ndef_(nullptr), attrs_(a) {}
string SummarizeAttrsHelper(AttrSlice attrs, StringPiece device) {
string ret;
std::vector<string> attr_names;
attr_names.reserve(attrs.size());
for (const auto& attr : attrs) {
attr_names.push_back(attr.first);
}
std::sort(attr_names.begin(), attr_names.end());
bool first = true;
for (const string& attr_name : attr_names) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
strings::StrAppend(&ret, attr_name, "=",
SummarizeAttrValue(*attrs.Find(attr_name)));
}
if (!device.empty()) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
strings::StrAppend(&ret, "_device=\"", device, "\"");
}
return ret;
}
string AttrSlice::SummarizeNode() const {
return ndef_ ? SummarizeNodeDef(*ndef_)
: strings::StrCat(
"[", SummarizeAttrsHelper(*this, StringPiece()), "]");
}
string AttrSlice::DebugString() const {
std::vector<string> attr_key_vals;
attr_key_vals.reserve(attrs()->size());
for (const auto& it : *this) {
const string& name = it.first;
const AttrValue& attr_value = it.second;
attr_key_vals.push_back(
absl::StrCat(name, "=", SummarizeAttrValue(attr_value)));
}
return absl::StrJoin(attr_key_vals, ", ");
}
string SummarizeNodeDef(const NodeDef& node_def, int max_inputs_in_summary) {
string ret = strings::StrCat(errors::FormatNodeNameForError(node_def.name()),
" = ", node_def.op(), "[");
strings::StrAppend(&ret, SummarizeAttrsHelper(node_def, node_def.device()));
strings::StrAppend(&ret, "](");
bool first = true;
for (const string& input : node_def.input()) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
if (max_inputs_in_summary-- == 0) {
strings::StrAppend(&ret, "...");
break;
}
strings::StrAppend(&ret, input);
}
strings::StrAppend(&ret, ")");
return ret;
}
string SummarizeAttrs(const NodeDef& node_def) {
return SummarizeAttrsHelper(node_def, node_def.device());
}
string FormatNodeDefForError(
StringPiece node_name, bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info) {
return !has_experimental_debug_info ||
experimental_debug_info.original_node_names().empty()
? errors::FormatNodeNameForError(string(node_name))
: errors::FormatOriginalNodeLocationForError(
experimental_debug_info.original_node_names(),
experimental_debug_info.original_func_names());
}
string FormatNodeDefForError(const NodeDef& node_def) {
return FormatNodeDefForError(node_def.name(),
node_def.has_experimental_debug_info(),
node_def.experimental_debug_info());
}
const AttrValue* AttrSlice::Find(StringPiece attr_name) const {
for (const auto& attr : *attrs()) {
if (attr.first == attr_name) {
return &attr.second;
}
}
return nullptr;
}
const AttrValue* AttrSlice::FindByString(const string& attr_name) const {
auto iter = attrs()->find(attr_name);
if (iter != attrs()->end()) {
return &iter->second;
} else {
return nullptr;
}
}
Status AttrSlice::CheckFind(StringPiece attr_name,
const AttrValue* attr_value) const {
if (attr_value != nullptr) {
return absl::OkStatus();
}
Status s = errors::NotFound("No attr named '", attr_name, "' in NodeDef:");
if (!absl::StartsWith(attr_name, "_") && ndef_ != nullptr) {
s = AttachDef(s, *ndef_);
}
return s;
}
Status AttrSlice::Find(StringPiece attr_name,
const AttrValue** attr_value) const {
*attr_value = Find(attr_name);
return CheckFind(attr_name, *attr_value);
}
Status AttrSlice::FindByString(const string& attr_name,
const AttrValue** attr_value) const {
*attr_value = FindByString(attr_name);
return CheckFind(attr_name, *attr_value);
}
bool AttrSlice::EqualAttrs(AttrSlice other, Scratch* scratch) const {
if (size() != other.size()) return false;
for (const auto& attr : *other.attrs()) {
auto iter = attrs()->find(attr.first);
if (iter == attrs()->end()) return false;
iter->second.SerializeToString(&scratch->a);
attr.second.SerializeToString(&scratch->b);
if (scratch->a != scratch->b) return false;
}
return true;
}
#define DEFINE_GET_ATTR(TYPE, FIELD, ATTR_TYPE, APPEND_OP, CAST, ...) \
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
TYPE* value) { \
const AttrValue* attr_value; \
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value)); \
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, ATTR_TYPE)); \
const auto& v = attr_value->FIELD(); \
__VA_ARGS__; \
*value = CAST; \
return OkStatus(); \
} \
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
std::vector<TYPE>* value) { \
const AttrValue* attr_value; \
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value)); \
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "list(" ATTR_TYPE ")")); \
value->reserve(attr_value->list().FIELD().size()); \
for (const auto& v : attr_value->list().FIELD()) { \
__VA_ARGS__; \
value->APPEND_OP(CAST); \
} \
return OkStatus(); \
}
#define DEFINE_TRY_GET_ATTR(TYPE, FIELD, ATTR_TYPE, APPEND_OP, CAST, ...) \
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
TYPE* value) { \
const AttrValue* attr_value = attrs.Find(attr_name); \
if (attr_value == nullptr) { \
return false; \
} \
Status s = AttrValueHasType(*attr_value, ATTR_TYPE); \
if (!s.ok()) { \
return false; \
} \
const auto& v = attr_value->FIELD(); \
__VA_ARGS__; \
*value = CAST; \
return true; \
} \
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
std::vector<TYPE>* value) { \
const AttrValue* attr_value = attrs.Find(attr_name); \
if (attr_value == nullptr) { \
return false; \
} \
Status s = AttrValueHasType(*attr_value, "list(" ATTR_TYPE ")"); \
if (!s.ok()) { \
return false; \
} \
value->reserve(attr_value->list().FIELD().size()); \
for (const auto& v : attr_value->list().FIELD()) { \
__VA_ARGS__; \
value->APPEND_OP(CAST); \
} \
return true; \
}
DEFINE_GET_ATTR(tstring, s, "string", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(tstring, s, "string", emplace_back, v, ;)
DEFINE_GET_ATTR(string, s, "string", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(string, s, "string", emplace_back, v, ;)
DEFINE_GET_ATTR(int64_t, i, "int", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(int64_t, i, "int", emplace_back, v, ;)
DEFINE_GET_ATTR(
int32, i, "int", emplace_back, static_cast<int32>(v),
if (static_cast<int64_t>(static_cast<int32>(v)) != v) {
return errors::InvalidArgument("Attr ", attr_name, " has value ", v,
" out of range for an int32");
})
DEFINE_TRY_GET_ATTR(
int32, i, "int", emplace_back, static_cast<int32>(v),
if (static_cast<int64_t>(static_cast<int32>(v)) != v) {
static int log_counter = 0;
if (log_counter < 10) {
log_counter++;
LOG(WARNING) << "Attr " << attr_name << " has value " << v
<< " out of range for an int32";
}
return false;
})
DEFINE_GET_ATTR(float, f, "float", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(float, f, "float", emplace_back, v, ;)
DEFINE_GET_ATTR(bool, b, "bool", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(bool, b, "bool", emplace_back, v, ;)
DEFINE_GET_ATTR(DataType, type, "type", emplace_back, static_cast<DataType>(v),
;)
DEFINE_TRY_GET_ATTR(DataType, type, "type", emplace_back,
static_cast<DataType>(v),
;)
DEFINE_GET_ATTR(TensorShapeProto, shape, "shape", emplace_back, v, ;)
DEFINE_GET_ATTR(TensorShape, shape, "shape", emplace_back, TensorShape(v),
TF_RETURN_IF_ERROR(TensorShape::IsValidShape(v));)
DEFINE_TRY_GET_ATTR(
TensorShape, shape, "shape", emplace_back, TensorShape(v),
if (!TensorShape::IsValidShape(v).ok()) {
static int log_counter = 0;
if (log_counter < 10) {
log_counter++;
LOG(WARNING) << "Attr " << attr_name << " has invalid shape value "
<< v.DebugString();
}
return false;
})
DEFINE_GET_ATTR(PartialTensorShape, shape, "shape", emplace_back,
PartialTensorShape(v),
TF_RETURN_IF_ERROR(PartialTensorShape::IsValidShape(v));)
DEFINE_GET_ATTR(
Tensor, tensor, "tensor", emplace_back, t, Tensor t; if (!t.FromProto(v)) {
return errors::InvalidArgument("Attr ", attr_name, " has value ",
v.ShortDebugString(),
" that can't be converted to a Tensor");
})
DEFINE_GET_ATTR(NameAttrList, func, "func", emplace_back, v, ;);
#undef DEFINE_GET_ATTR
bool HasNodeAttr(const NodeDef& node_def, StringPiece attr_name) {
return node_def.attr().find(string(attr_name)) != node_def.attr().end();
}
static const string& kEmptyString = *new string();
const string& GetNodeAttrString(const AttrSlice& attrs, StringPiece attr_name) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return kEmptyString;
}
Status s = AttrValueHasType(*attr_value, "string");
if (!s.ok()) {
return kEmptyString;
}
return attr_value->s();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
std::vector<const string*>* value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "list(string)");
if (!s.ok()) {
return false;
}
value->reserve(attr_value->list().s().size());
for (const auto& v : attr_value->list().s()) {
value->push_back(&v);
}
return true;
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
std::vector<const TensorShapeProto*>* value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "list(shape)");
if (!s.ok()) {
return false;
}
value->reserve(attr_value->list().shape().size());
for (const auto& v : attr_value->list().shape()) {
value->push_back(&v);
}
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
DataTypeVector* value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "list(type)"));
for (const auto& v : attr_value->list().type()) {
value->push_back(static_cast<DataType>(v));
}
return absl::OkStatus();
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const TensorProto** value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "tensor"));
*value = &attr_value->tensor();
return absl::OkStatus();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const TensorProto** value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "tensor");
if (!s.ok()) {
return false;
}
*value = &attr_value->tensor();
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const NameAttrList** value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "func"));
*value = &attr_value->func();
return absl::OkStatus();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const NameAttrList** value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "func");
if (!s.ok()) {
return false;
}
*value = &attr_value->func();
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
Padding* value) {
string str_value;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, attr_name, &str_value));
return GetPaddingFromString(str_value, value);
}
namespace {
template <class NodeDefOrAttrSlice>
Status AddArgToSig(const NodeDefOrAttrSlice& node_or_attrs,
const OpDef::ArgDef& arg_def, DataTypeVector* sig) {
const int original_size = sig->size();
if (!arg_def.number_attr().empty()) {
int64_t repeats = -1;
TF_RETURN_IF_ERROR(
GetNodeAttr(node_or_attrs, arg_def.number_attr(), &repeats));
if (static_cast<int64_t>(static_cast<int32>(repeats)) != repeats) {
return errors::InvalidArgument("Number of outputs is too big: ", repeats);
}
if (repeats < 0) {
return errors::InvalidArgument("Value for number_attr() ", repeats,
" < 0");
}
if (!arg_def.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(node_or_attrs, arg_def.type_attr(), &dtype));
for (int i = 0; i < repeats; ++i) {
sig->push_back(dtype);
}
} else if (arg_def.type() != DT_INVALID) {
for (int i = 0; i < repeats; ++i) {
sig->push_back(arg_def.type());
}
} else {
return errors::InvalidArgument("Missing type or type_attr field in ",
arg_def.ShortDebugString());
}
} else if (!arg_def.type_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(AttrSlice(node_or_attrs)
.FindByString(arg_def.type_attr(), &attr_value));
sig->push_back(attr_value->type());
} else if (!arg_def.type_list_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(
AttrSlice(node_or_attrs)
.FindByString(arg_def.type_list_attr(), &attr_value));
for (int dtype : attr_value->list().type()) {
sig->push_back(static_cast<DataType>(dtype));
}
} else if (arg_def.type() != DT_INVALID) {
sig->push_back(arg_def.type());
} else {
return errors::InvalidArgument("No type fields in ",
arg_def.ShortDebugString());
}
if (arg_def.is_ref()) {
for (size_t i = original_size; i < sig->size(); ++i) {
if (IsRefType((*sig)[i])) {
return errors::InvalidArgument(
"Requested reference to a reference type: ",
arg_def.ShortDebugString());
}
(*sig)[i] = MakeRefType((*sig)[i]);
}
}
return absl::OkStatus();
}
}
Status InputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
int input_port, DataType* input_type) {
DataTypeVector input_types;
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &input_types));
int input_types_size = input_types.size();
if (input_types_size > input_port) {
const DataType dtype = input_types[input_port];
*input_type = dtype;
return absl::OkStatus();
}
}
return errors::InvalidArgument("Input ", input_port, " not found for node ",
node_def.name());
}
Status InputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* inputs) {
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, inputs));
}
return absl::OkStatus();
}
Status OutputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
int output_port, DataType* output_type) {
DataTypeVector output_types;
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &output_types));
int output_types_size = output_types.size();
if (output_types_size > output_port) {
const DataType dtype = output_types[output_port];
*output_type = dtype;
return absl::OkStatus();
}
}
return errors::InvalidArgument("Output ", output_port, " not found for node ",
node_def.name());
}
Status OutputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, outputs));
}
return absl::OkStatus();
}
Status OutputTypesForNode(const AttrSlice& attrs, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(attrs, arg, outputs));
}
return absl::OkStatus();
}
Status InOutTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* inputs, DataTypeVector* outputs) {
TF_RETURN_IF_ERROR(InputTypesForNode(node_def, op_def, inputs));
return OutputTypesForNode(node_def, op_def, outputs);
}
Status NumOutputsForNode(const NodeDef& node_def, const OpDef& op_def,
int* num_outputs) {
DataTypeVector outputs;
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, op_def, &outputs));
*num_outputs = outputs.size();
return absl::OkStatus();
}
int OpPortIdToArgId(const NodeDef& node,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
int port_id) {
for (int arg_id = 0; arg_id < args.size(); ++arg_id) {
if (port_id < 0) {
return -1;
} else if (port_id == 0) {
return arg_id;
}
int n = 1;
const auto& arg = args.Get(arg_id);
if (!arg.number_attr().empty()) {
n = node.attr().at(arg.number_attr()).i();
} else if (!arg.type_list_attr().empty()) {
n = node.attr().at(arg.type_list_attr()).list().type_size();
}
if (n < 0) {
DCHECK_GE(n, 0);
return -1;
} else if (port_id < n) {
return arg_id;
}
port_id -= n;
}
return -1;
}
Status ValidateNodeDef(const NodeDef& node_def, const OpDef& op_def) {
if (node_def.op() != op_def.name()) {
return errors::InvalidArgument(
"NodeDef op '", node_def.op(), "' does not match ",
SummarizeOpDef(op_def), "; NodeDef: ", FormatNodeDefForError(node_def));
}
bool seen_control = false;
size_t num_inputs = 0;
for (const string& input : node_def.input()) {
if (absl::StartsWith(input, "^")) {
seen_control = true;
if (input.find(':') != string::npos) {
return errors::InvalidArgument("Control input '", input,
"' must not have ':' in NodeDef: ",
FormatNodeDefForError(node_def));
}
} else if (seen_control) {
return errors::InvalidArgument("Non-control input '", input,
"' after control input in NodeDef: ",
FormatNodeDefForError(node_def));
} else {
++num_inputs;
}
}
std::unordered_map<string, const OpDef::AttrDef*> op_attrs;
for (const auto& attr : op_def.attr()) {
if (!gtl::InsertIfNotPresent(&op_attrs, attr.name(), &attr)) {
return errors::InvalidArgument("OpDef has duplicate attr name '",
attr.name(),
"': ", SummarizeOpDef(op_def));
}
}
for (const auto& attr : node_def.attr()) {
if (absl::StartsWith(attr.first, "_")) {
continue;
}
auto iter = op_attrs.find(attr.first);
if (iter == op_attrs.end()) {
LOG_EVERY_N_SEC(ERROR, 5)
<< "NodeDef mentions attribute " << attr.first
<< " which is not in the op definition: " << SummarizeOpDef(op_def)
<< " This may be expected if your graph generating binary is newer "
<< " than this binary. Unknown attributes will be ignored."
<< " NodeDef: " << FormatNodeDefForError(node_def);
continue;
}
if (attr.second.placeholder().empty()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
ValidateAttrValue(attr.second, *iter->second),
"; NodeDef: ", FormatNodeDefForError(node_def), "; ",
SummarizeOpDef(op_def));
}
op_attrs.erase(iter);
}
if (!op_attrs.empty()) {
string attrs;
for (const auto& attr_pair : op_attrs) {
if (!attrs.empty()) strings::StrAppend(&attrs, "', '");
strings::StrAppend(&attrs, attr_pair.first);
}
return errors::InvalidArgument(
"NodeDef missing attr", op_attrs.size() == 1 ? " '" : "s '", attrs,
"' from ", SummarizeOpDef(op_def),
"; NodeDef: ", FormatNodeDefForError(node_def));
}
DataTypeVector inputs, outputs;
TF_RETURN_IF_ERROR(InOutTypesForNode(node_def, op_def, &inputs, &outputs));
if (num_inputs != inputs.size()) {
return errors::InvalidArgument(
"NodeDef expected inputs '", DataTypeVectorString(inputs),
"' do not match ", num_inputs, " inputs specified; ",
SummarizeOpDef(op_def), "; NodeDef: ", FormatNodeDefForError(node_def));
}
return absl::OkStatus();
}
namespace {
Status ComputeArgRange(const AttrSlice& attrs, const OpDef::ArgDef& arg_def,
const OpDef& op_def, int* num) {
if (!arg_def.number_attr().empty()) {
return GetNodeAttr(attrs, arg_def.number_attr(), num);
} else if (!arg_def.type_list_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(arg_def.type_list_attr(), &attr_value));
*num = attr_value->list().type_size();
} else if (!arg_def.type_attr().empty() || arg_def.type() != DT_INVALID) {
*num = 1;
} else {
return errors::InvalidArgument(
"Argument '", arg_def.name(),
"' incorrectly specified in op definition: ", SummarizeOpDef(op_def));
}
return absl::OkStatus();
}
Status NameRangesHelper(const AttrSlice& attrs,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
const OpDef& op_def, NameRangeMap* result) {
int start = 0;
int num;
for (const auto& arg : args) {
TF_RETURN_IF_ERROR(ComputeArgRange(attrs, arg, op_def, &num));
(*result)[arg.name()] = std::make_pair(start, start + num);
start += num;
}
return absl::OkStatus();
}
}
Status NameRangesForNode(const AttrSlice& attrs, const OpDef& op_def,
NameRangeMap* inputs, NameRangeMap* outputs) {
if (inputs != nullptr) {
TF_RETURN_IF_ERROR(
NameRangesHelper(attrs, op_def.input_arg(), op_def, inputs));
}
if (outputs != nullptr) {
return NameRangesHelper(attrs, op_def.output_arg(), op_def, outputs);
}
return absl::OkStatus();
}
void AddDefaultsToNodeDef(const OpDef& op_def, NodeDef* node_def) {
for (const auto& attr_def : op_def.attr()) {
AttrSlice attrs(*node_def);
if (attr_def.has_default_value() && !attrs.Find(attr_def.name())) {
AddNodeAttr(attr_def.name(), attr_def.default_value(), node_def);
}
}
}
void StripDefaultsFromNodeDef(const OpDef& op_def, NodeDef* node_def) {
AttrSlice attrs(*node_def);
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value()) {
const AttrValue* attr = attrs.Find(attr_def.name());
if (attr && AreAttrValuesEqual(*attr, attr_def.default_value()))
node_def->mutable_attr()->erase(attr_def.name());
}
}
}
namespace {
using ::tensorflow::tstring;
using ::tensorflow::strings::Scanner;
bool IsValidNodeName(StringPiece sp) {
Scanner scanner(sp);
scanner.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scanner.GetResult())
return false;
if (scanner.empty())
return true;
scanner.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
bool IsValidDataInputName(StringPiece sp) {
Scanner scan(sp);
scan.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scan.GetResult())
return false;
if (scan.empty())
return true;
if (scan.Peek() == ':') {
scan.OneLiteral(":");
if (scan.Peek() == '0') {
scan.OneLiteral("0");
} else {
scan.Many(Scanner::DIGIT);
}
} else {
scan.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
}
bool IsValidControlInputName(StringPiece sp) {
Scanner scan(sp);
scan.OneLiteral("^")
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scan.GetResult())
return false;
if (scan.empty())
return true;
scan.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
const StringPiece kColocationGroupPrefixStringPiece(kColocationGroupPrefix);
}
Status ValidateOpInput(const string& input_name, bool* is_control_input) {
*is_control_input = false;
if (IsValidDataInputName(input_name)) {
return absl::OkStatus();
} else if (IsValidControlInputName(input_name)) {
*is_control_input = true;
return absl::OkStatus();
} else {
return errors::InvalidArgument("Illegal op input name '", input_name, "'");
}
}
Status ValidateNodeName(const string& node_name) {
if (IsValidNodeName(node_name)) {
return absl::OkStatus();
} else {
return errors::InvalidArgument("Illegal op name '", node_name, "'");
}
}
Status ValidateExternalNodeDefSyntax(const NodeDef& node_def) {
Status s = ValidateNodeName(node_def.name());
if (!s.ok()) {
return AttachDef(s, node_def);
}
bool in_control_inputs = false;
for (const string& input_name : node_def.input()) {
bool is_control_input;
s = ValidateOpInput(input_name, &is_control_input);
if (!s.ok()) {
return AttachDef(s, node_def);
}
if (in_control_inputs && !is_control_input) {
return AttachDef(errors::InvalidArgument(
"All control inputs must follow all data inputs"),
node_def);
}
in_control_inputs = is_control_input;
}
return absl::OkStatus();
}
Status AttachDef(const Status& status, const NodeDef& node_def,
bool allow_multiple_formatted_node) {
string node_error;
if (!allow_multiple_formatted_node &&
absl::StrContains(status.message(), "{{node ")) {
node_error = node_def.name();
} else {
node_error = FormatNodeDefForError(node_def);
}
return errors::CreateWithUpdatedMessage(
status,
strings::StrCat(status.message(), "\n\t", " [[", node_error, "]]"));
}
void AddNodeAttr(StringPiece name, const AttrValue& value, NodeDef* node_def) {
node_def->mutable_attr()->insert(
AttrValueMap::value_type(string(name), value));
}
void AddNodeAttr(StringPiece name, AttrValue&& value, NodeDef* node_def) {
(*node_def->mutable_attr())[string(name)] = std::move(value);
}
#define ADD_NODE_ATTR(T) \
void AddNodeAttr(StringPiece name, T value, NodeDef* node_def) { \
AttrValue attr_value; \
SetAttrValue(value, &attr_value); \
AddNodeAttr(name, attr_value, node_def); \
}
ADD_NODE_ATTR(StringPiece)
ADD_NODE_ATTR(const char*)
ADD_NODE_ATTR(int32_t)
ADD_NODE_ATTR(int64_t)
ADD_NODE_ATTR(float)
ADD_NODE_ATTR(double)
ADD_NODE_ATTR(bool)
ADD_NODE_ATTR(DataType)
ADD_NODE_ATTR(const PartialTensorShape&)
ADD_NODE_ATTR(const Tensor&)
ADD_NODE_ATTR(const TensorProto&)
ADD_NODE_ATTR(const NameAttrList&)
ADD_NODE_ATTR(absl::Span<const StringPiece>)
ADD_NODE_ATTR(absl::Span<const char* const>)
ADD_NODE_ATTR(absl::Span<const string>)
ADD_NODE_ATTR(absl::Span<const int32>)
ADD_NODE_ATTR(absl::Span<const int64_t>)
ADD_NODE_ATTR(absl::Span<const float>)
ADD_NODE_ATTR(absl::Span<const bool>)
ADD_NODE_ATTR(const std::vector<bool>&)
ADD_NODE_ATTR(absl::Span<const DataType>)
ADD_NODE_ATTR(absl::Span<const TensorShape>)
ADD_NODE_ATTR(absl::Span<const PartialTensorShape>)
ADD_NODE_ATTR(absl::Span<const TensorShapeProto>)
ADD_NODE_ATTR(absl::Span<const Tensor>)
ADD_NODE_ATTR(absl::Span<const NameAttrList>)
#undef ADD_NODE_ATTR
void AddAttr(StringPiece name, const AttrValue& value, AttrValueMap* map) {
map->insert(AttrValueMap::value_type(string(name), value));
}
#define ADD_ATTR(T) \
void AddAttr(StringPiece name, T value, AttrValueMap* map) { \
AttrValue attr_value; \
SetAttrValue(value, &attr_value); \
AddAttr(name, attr_value, map); \
}
ADD_ATTR(bool)
#undef ADD_ATTR
Status AddPrefixAndSuffixToNode(StringPiece prefix, StringPiece suffix,
NodeDef* node_def, bool uniquify_frame_name) {
node_def->set_name(strings::StrCat(prefix, node_def->name(), suffix));
if (uniquify_frame_name &&
(node_def->op() == "Enter" || node_def->op() == "RefEnter")) {
string frame_name;
TF_RETURN_IF_ERROR(GetNodeAttr(*node_def, "frame_name", &frame_name));
AttrValue& attr = (*node_def->mutable_attr())["frame_name"];
frame_name = strings::StrCat(prefix, frame_name, suffix);
attr.set_s(frame_name);
}
return absl::OkStatus();
}
Status MaybeAddPrefixToColocationConstraints(
const std::unordered_set<string>& match, StringPiece prefix,
NodeDef* node_def) {
auto attr = node_def->mutable_attr()->find(kColocationAttrName);
if (attr == node_def->mutable_attr()->end()) {
return absl::OkStatus();
}
auto constraints_list = attr->second.mutable_list();
auto constraints_size = constraints_list->s_size();
for (size_t i = 0; i < constraints_size; ++i) {
StringPiece original(constraints_list->s(i));
if (absl::ConsumePrefix(&original, kColocationGroupPrefixStringPiece)) {
if (match.find(string(original)) != match.end()) {
(*constraints_list->mutable_s(i)) =
strings::StrCat(kColocationGroupPrefix, prefix, original);
}
}
}
return absl::OkStatus();
}
Status MaybeUpdateColocationConstraintsWithMap(
const std::map<absl::string_view, absl::string_view>& node_name_map,
NodeDef* node_def) {
auto attr = node_def->mutable_attr()->find(kColocationAttrName);
if (attr == node_def->mutable_attr()->end()) {
return absl::OkStatus();
}
auto constraints_list = attr->second.mutable_list();
auto constraints_size = constraints_list->s_size();
for (size_t i = 0; i < constraints_size; ++i) {
StringPiece original(constraints_list->s(i));
if (absl::ConsumePrefix(&original, kColocationGroupPrefixStringPiece)) {
if (node_name_map.find(original) != node_name_map.end()) {
(*constraints_list->mutable_s(i)) =
strings::StrCat(kColocationGroupPrefix, node_name_map.at(original));
}
}
}
return absl::OkStatus();
}
void ChangeToNoOp(NodeDef* node_def) {
node_def->set_op("NoOp");
node_def->clear_experimental_type();
}
} | #include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
OpDef ToOpDef(const OpDefBuilder& builder) {
OpRegistrationData op_reg_data;
TF_EXPECT_OK(builder.Finalize(&op_reg_data));
return op_reg_data.op_def;
}
NodeDef ToNodeDef(const string& text) {
NodeDef node_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
return node_def;
}
NodeDef ToNodeDef(NodeDefBuilder&& builder) {
NodeDef node_def;
TF_EXPECT_OK(builder.Finalize(&node_def));
return node_def;
}
void ExpectSuccess(const NodeDef& good, const OpDef& op_def) {
EXPECT_EQ(absl::OkStatus(), ValidateNodeDef(good, op_def))
<< "NodeDef: " << SummarizeNodeDef(good)
<< "; OpDef: " << SummarizeOpDef(op_def);
}
void ExpectFailure(const NodeDef& bad, const OpDef& op_def,
const string& message) {
Status status = ValidateNodeDef(bad, op_def);
EXPECT_FALSE(status.ok()) << "NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def);
if (status.ok()) return;
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< status << "; NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def);
LOG(INFO) << "Message: " << status.message();
EXPECT_TRUE(absl::StrContains(status.ToString(), message))
<< "NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def) << "\nActual error: " << status
<< "\nDoes not contain: " << message;
}
TEST(NodeDefUtilTest, In) {
const OpDef op = ToOpDef(OpDefBuilder("In").Input("i: T").Attr("T: type"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'In'
input: 'a'
attr {
key: 'T'
value { type: DT_FLOAT }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = In[T=DT_FLOAT](a)", SummarizeNodeDef(node_def));
NodeDef bad = node_def;
bad.set_op("Wrong");
ExpectFailure(bad, op, "NodeDef op 'Wrong' does not match Op<name=In;");
bad = node_def;
bad.clear_attr();
ExpectFailure(bad, op, "NodeDef missing attr 'T' from Op<name=In;");
bad = node_def;
bad.clear_attr();
AddNodeAttr("T", 17, &bad);
ExpectFailure(
bad, op,
"AttrValue had value with type 'int' when 'type' expected\n\t for attr "
"'T'\n\t; NodeDef: ");
bad = node_def;
bad.add_input("b");
ExpectFailure(
bad, op,
"NodeDef expected inputs 'float' do not match 2 inputs specified;");
bad = node_def;
bad.clear_input();
ExpectFailure(
bad, op,
"NodeDef expected inputs 'float' do not match 0 inputs specified;");
NodeDef good = node_def;
good.add_input("^b");
ExpectSuccess(node_def, op);
bad = node_def;
bad.clear_input();
bad.add_input("^b");
bad.add_input("a");
ExpectFailure(bad, op,
"Non-control input 'a' after control input "
"in NodeDef:");
bad = node_def;
bad.add_input("^b:0");
ExpectFailure(bad, op, "Control input '^b:0' must not have ':' in NodeDef:");
}
TEST(NodeDefUtilTest, Out) {
const OpDef op =
ToOpDef(OpDefBuilder("Out").Output("o: T").Attr("T: numbertype"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'Out'
attr {
key: 'T'
value { type: DT_INT32 }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = Out[T=DT_INT32]()", SummarizeNodeDef(node_def));
NodeDef bad = node_def;
bad.clear_attr();
AddNodeAttr("T", DT_STRING, &bad);
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double, int32, uint8, int16, int8, complex64, "
"int64, qint8, quint8, qint32, bfloat16, qint16, quint16, "
"uint16, complex128, "
"half, uint32, uint64");
}
TEST(NodeDefUtilTest, Enum) {
const OpDef op = ToOpDef(OpDefBuilder("Enum").Attr("e: {'apple','orange'}"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'Enum'
attr {
key: 'e'
value { s: 'apple' }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = Enum[e=\"apple\"]()", SummarizeNodeDef(node_def));
NodeDef good = node_def;
good.clear_attr();
AddNodeAttr("e", "orange", &good);
ExpectSuccess(good, op);
NodeDef bad = node_def;
bad.clear_attr();
AddNodeAttr("e", "foo", &bad);
ExpectFailure(bad, op,
"Value for attr 'e' of \"foo\" is not in the list of allowed "
"values: \"apple\", \"orange\"");
}
TEST(NodeDefUtilTest, SameIn) {
const OpDef op = ToOpDef(OpDefBuilder("SameIn")
.Input("i: N * T")
.Attr("N: int >= 2")
.Attr("T: {float,double}"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 2 }
}
attr {
key: 'T'
value { type: DT_DOUBLE }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = SameIn[N=2, T=DT_DOUBLE](a, b)",
SummarizeNodeDef(node_def));
NodeDef bad = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 2 }
}
attr {
key: 'T'
value { type: DT_STRING }
}
)pb");
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double");
bad = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 1 }
}
attr {
key: 'T'
value { type: DT_FLOAT }
}
)pb");
ExpectFailure(bad, op, "Value for attr 'N' of 1 must be at least minimum 2");
}
TEST(NodeDefUtilTest, AnyIn) {
const OpDef op =
ToOpDef(OpDefBuilder("AnyIn").Input("i: T").Attr("T: list(type) >= 1"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = AnyIn[T=[DT_INT32, DT_STRING]](a, b)",
SummarizeNodeDef(node_def));
const NodeDef bad = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
attr {
key: 'T'
value { list {} }
}
)pb");
ExpectFailure(bad, op, "Length for attr 'T' of 0 must be at least minimum 1");
const NodeDef bad2 = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
attr {
key: 'T'
value {}
}
)pb");
ExpectFailure(bad2, op,
"Length for attr 'T' of 0 must be at least minimum 1");
}
TEST(NodeDefUtilTest, Device) {
const OpDef op_def1 = ToOpDef(OpDefBuilder("None"));
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("d", &op_def1).Device("/cpu:17")));
ExpectSuccess(node_def1, op_def1);
EXPECT_EQ("{{node d}} = None[_device=\"/cpu:17\"]()",
SummarizeNodeDef(node_def1));
const OpDef op_def2 = ToOpDef(OpDefBuilder("WithAttr").Attr("v: int"));
const NodeDef node_def2 = ToNodeDef(
std::move(NodeDefBuilder("d", &op_def2).Attr("v", 7).Device("/cpu:5")));
ExpectSuccess(node_def2, op_def2);
EXPECT_EQ("{{node d}} = WithAttr[v=7, _device=\"/cpu:5\"]()",
SummarizeNodeDef(node_def2));
}
void ExpectValidSyntax(const NodeDef& good) {
EXPECT_EQ(absl::OkStatus(), ValidateExternalNodeDefSyntax(good))
<< "NodeDef: " << SummarizeNodeDef(good);
}
void ExpectInvalidSyntax(const NodeDef& bad, const string& message) {
Status status = ValidateExternalNodeDefSyntax(bad);
ASSERT_FALSE(status.ok()) << "NodeDef: " << SummarizeNodeDef(bad);
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< status << "; NodeDef: " << SummarizeNodeDef(bad);
EXPECT_TRUE(absl::StrContains(StringPiece(status.ToString()), message))
<< "NodeDef: " << SummarizeNodeDef(bad) << ", " << status << ", "
<< message;
}
TEST(NodeDefUtilTest, ValidSyntax) {
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def);
const NodeDef node_def_namespace = ToNodeDef(R"pb(
name: 'n'
op: 'Project>AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_namespace);
const NodeDef node_def_explicit_inputs = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:0'
input: 'b:123'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_explicit_inputs);
EXPECT_EQ("{{node n}} = AnyIn[T=[DT_INT32, DT_STRING]](a:0, b:123)",
SummarizeNodeDef(node_def_explicit_inputs));
const NodeDef node_def_explicit_inputs_namespace = ToNodeDef(R"pb(
name: 'Project>n'
op: 'Project>AnyIn'
input: 'Project>a:0'
input: 'Project>b:123'
input: '^Project>c'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_explicit_inputs_namespace);
EXPECT_EQ(
"{{node Project>n}} = Project>AnyIn[T=[DT_INT32, DT_STRING]]"
"(Project>a:0, Project>b:123, ^Project>c)",
SummarizeNodeDef(node_def_explicit_inputs_namespace));
const NodeDef node_def_partial_shape = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
attr {
key: 'shp'
value {
shape {
dim { size: -1 }
dim { size: 0 }
}
}
}
)pb");
ExpectValidSyntax(node_def_partial_shape);
const NodeDef node_def_control_input = ToNodeDef(R"pb(
name: 'n-'
op: 'AnyIn'
input: 'a'
input: '^b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_control_input);
const NodeDef node_def_invalid_name = ToNodeDef(R"pb(
name: 'n:0'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_invalid_name, "Illegal op name 'n:0'");
const NodeDef node_def_internal_name = ToNodeDef(R"pb(
name: '_n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_internal_name, "Illegal op name '_n'");
const NodeDef node_def_slash_in_name = ToNodeDef(R"pb(
name: 'n\\'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_slash_in_name, "Illegal op name 'n\\'");
const NodeDef node_def_internal_input_name = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: '_a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_internal_input_name,
"Illegal op input name '_a'");
const NodeDef node_def_input_name_slash = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a\\'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_input_name_slash, "Illegal op input name 'a\\'");
const NodeDef node_def_invalid_control_input_name = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: '^b:0'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_invalid_control_input_name,
"Illegal op input name '^b:0'");
const NodeDef node_def_control_input_name_slash = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: '^b\\'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_control_input_name_slash,
"Illegal op input name '^b\\'");
const NodeDef node_def_data_input_after_control = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: '^a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_after_control,
"All control inputs must follow all data inputs");
const NodeDef node_def_data_input_invalid_port = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:b'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_invalid_port,
"Illegal op input name 'a:b");
const NodeDef node_def_data_input_invalid_port2 = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:00'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_invalid_port2,
"Illegal op input name 'a:00");
}
TEST(InputTypesForNode, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
DataTypeVector types;
EXPECT_TRUE(InputTypesForNode(node_def, op_def, &types).ok());
EXPECT_EQ(types[0], DT_FLOAT);
EXPECT_EQ(types[1], DT_INT32);
DataType type;
EXPECT_TRUE(InputTypeForNode(node_def, op_def, 0, &type).ok());
EXPECT_EQ(type, DT_FLOAT);
EXPECT_TRUE(InputTypeForNode(node_def, op_def, 1, &type).ok());
EXPECT_EQ(type, DT_INT32);
EXPECT_FALSE(InputTypeForNode(node_def, op_def, 2, &type).ok());
}
TEST(OutputTypesForNode, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(node_def, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
DataType type;
EXPECT_TRUE(OutputTypeForNode(node_def, op_def, 0, &type).ok());
EXPECT_EQ(type, DT_STRING);
EXPECT_TRUE(OutputTypeForNode(node_def, op_def, 1, &type).ok());
EXPECT_EQ(type, DT_BOOL);
EXPECT_FALSE(OutputTypeForNode(node_def, op_def, 2, &type).ok());
}
TEST(OutputTypesForNode, LargeOutput) {
const OpDef op_def = ToOpDef(OpDefBuilder("TestSplitOp")
.Input("value: int64")
.Output("output: num_split * int64")
.Attr("num_split: int >= 1"));
int64_t num_split = 1000000000000;
const NodeDef node_def =
ToNodeDef(std::move(NodeDefBuilder("test_split_op", &op_def)
.Input(FakeInput())
.Attr("num_split", num_split)));
DataTypeVector types;
EXPECT_FALSE(OutputTypesForNode(node_def, op_def, &types).ok());
}
TEST(OutputTypesForNode_AttrSliceOverload, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const AttrSlice attr_slice =
AttrSlice(ToNodeDef(std::move(NodeDefBuilder("simple", &op_def)
.Input(FakeInput())
.Input(FakeInput()))));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(attr_slice, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
}
TEST(NameRangesForNodeTest, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
NameRangeMap inputs, outputs;
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
TF_EXPECT_OK(NameRangesForNode(node_def, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 2}}}), outputs);
EXPECT_EQ("{{node simple}} = Simple[](a, b)", SummarizeNodeDef(node_def));
OpDef bad_op_def = op_def;
bad_op_def.mutable_input_arg(0)->clear_type();
EXPECT_FALSE(NameRangesForNode(node_def, bad_op_def, &inputs, &outputs).ok());
}
TEST(NameRangesForNodeTest, Polymorphic) {
const OpDef op_def = ToOpDef(OpDefBuilder("Polymorphic")
.Input("a: T")
.Input("b: T")
.Output("c: T")
.Attr("T: type"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("{{node poly}} = Polymorphic[T=DT_INT32](a, b)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_BOOL))
.Input(FakeInput(DT_BOOL))));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("{{node poly}} = Polymorphic[T=DT_BOOL](a, b)",
SummarizeNodeDef(node_def2));
}
TEST(NameRangesForNodeTest, NRepeats) {
const OpDef op_def = ToOpDef(OpDefBuilder("NRepeats")
.Input("a: N * int32")
.Input("b: N * T")
.Output("c: T")
.Output("d: N * string")
.Output("e: M * bool")
.Attr("N: int")
.Attr("M: int")
.Attr("T: type"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("nr", &op_def)
.Input(FakeInput(4, DT_INT32))
.Input(FakeInput(4, DT_FLOAT))
.Attr("M", 3)));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 4}}, {"b", {4, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 5}}, {"e", {5, 8}}}),
outputs);
EXPECT_EQ(
"{{node nr}} = NRepeats[M=3, N=4, T=DT_FLOAT](a, a:1, a:2, a:3, b, b:1, "
"b:2, b:3)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("nr", &op_def)
.Input(FakeInput(2, DT_INT32))
.Input(FakeInput(2, DT_DOUBLE))
.Attr("M", 7)));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 4}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
EXPECT_EQ("{{node nr}} = NRepeats[M=7, N=2, T=DT_DOUBLE](a, a:1, b, b:1)",
SummarizeNodeDef(node_def2));
NodeDef bad_node_def = node_def2;
bad_node_def.clear_attr();
EXPECT_FALSE(NameRangesForNode(bad_node_def, op_def, &inputs, &outputs).ok());
}
TEST(NameRangesForNodeTest, TypeList) {
const OpDef op_def = ToOpDef(OpDefBuilder("TypeList")
.Input("a: T1")
.Input("b: T2")
.Output("c: T2")
.Output("d: T3")
.Output("e: T1")
.Attr("T1: list(type)")
.Attr("T2: list(type)")
.Attr("T3: list(type)"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("tl", &op_def)
.Input(FakeInput({DT_BOOL, DT_FLOAT}))
.Input(FakeInput(4, DT_FLOAT))
.Attr("T3", {DT_INT32, DT_DOUBLE, DT_STRING})));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 6}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 4}}, {"d", {4, 7}}, {"e", {7, 9}}}),
outputs);
EXPECT_EQ(
"{{node tl}} = TypeList[T1=[DT_BOOL, DT_FLOAT],"
" T2=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT],"
" T3=[DT_INT32, DT_DOUBLE, DT_STRING]](a, a:1, b, b:1, b:2, b:3)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("tl", &op_def)
.Input(FakeInput(7, DT_INT32))
.Input(FakeInput({DT_DOUBLE}))
.Attr("T3", {DT_DOUBLE, DT_STRING})));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 7}}, {"b", {7, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
EXPECT_EQ(
"{{node tl}} = TypeList[T1=[DT_INT32, DT_INT32, DT_INT32, DT_INT32, "
"DT_INT32,"
" DT_INT32, DT_INT32], T2=[DT_DOUBLE], T3=[DT_DOUBLE, DT_STRING]]"
"(a, a:1, a:2, a:3, a:4, a:5, a:6, b)",
SummarizeNodeDef(node_def2));
NodeDef bad_node_def = node_def2;
bad_node_def.clear_attr();
EXPECT_FALSE(NameRangesForNode(bad_node_def, op_def, &inputs, &outputs).ok());
}
TEST(AddPrefixAndSuffixToNode, Enter) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
const string prefix = "prefix/";
const string suffix = "/suffix";
TF_ASSERT_OK(AddPrefixAndSuffixToNode(prefix, suffix, &node_def));
EXPECT_EQ("prefix/enter/suffix", node_def.name());
string frame_name;
TF_ASSERT_OK(GetNodeAttr(node_def, "frame_name", &frame_name));
EXPECT_EQ("prefix/test_frame/suffix", frame_name);
}
TEST(MaybeAddPrefixToColocationConstraints, Basic) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
AddNodeAttr(kColocationAttrName,
{strings::StrCat(kColocationGroupPrefix, "Node1"),
strings::StrCat(kColocationGroupPrefix, "Node2"),
strings::StrCat(kColocationGroupPrefix, "Node3")},
&node_def);
std::unordered_set<string> match;
match.insert("Node1");
match.insert("Node3");
TF_ASSERT_OK(MaybeAddPrefixToColocationConstraints(match, "fn/", &node_def));
std::vector<string> coloc_constraints;
TF_ASSERT_OK(GetNodeAttr(node_def, kColocationAttrName, &coloc_constraints));
EXPECT_EQ(
coloc_constraints,
std::vector<string>({"loc:@fn/Node1", "loc:@Node2", "loc:@fn/Node3"}));
}
TEST(MaybeAddPrefixToColocationConstraints, NoConstraints) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
std::unordered_set<string> match;
match.insert("Node1");
match.insert("Node3");
TF_ASSERT_OK(MaybeAddPrefixToColocationConstraints(match, "fn/", &node_def));
EXPECT_FALSE(HasNodeAttr(node_def, kColocationAttrName));
}
TEST(MaybeUpdateColocationConstraintsWithMap, Basic) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
AddNodeAttr(kColocationAttrName,
{strings::StrCat(kColocationGroupPrefix, "Node1"),
strings::StrCat(kColocationGroupPrefix, "Node2"),
strings::StrCat(kColocationGroupPrefix, "Node3")},
&node_def);
std::map<absl::string_view, absl::string_view> node_map;
node_map["Node1"] = "Node4";
node_map["Invalid"] = "Node5";
TF_ASSERT_OK(MaybeUpdateColocationConstraintsWithMap(node_map, &node_def));
std::vector<string> coloc_constraints;
TF_ASSERT_OK(GetNodeAttr(node_def, kColocationAttrName, &coloc_constraints));
EXPECT_EQ(coloc_constraints,
std::vector<string>({"loc:@Node4", "loc:@Node2", "loc:@Node3"}));
}
TEST(MaybeUpdateColocationConstraintsWithMap, NoConstraints) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
std::map<absl::string_view, absl::string_view> node_map;
node_map["Node1"] = "Node4";
node_map["Invalid"] = "Node5";
TF_ASSERT_OK(MaybeUpdateColocationConstraintsWithMap(node_map, &node_def));
EXPECT_FALSE(HasNodeAttr(node_def, kColocationAttrName));
}
TEST(FormatNodeForErrorTest, Node) {
Graph g(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("enter", "NoOp").Finalize(&g, &node));
EXPECT_EQ("{{node enter}}", FormatNodeForError(*node));
}
TEST(FormatNodeForErrorTest, NodeDef) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
EXPECT_EQ("{{node enter}}", FormatNodeDefForError(node_def));
}
TEST(FormatNodeForErrorTest, NodeDefWithOriginalNames) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
*(node_def.mutable_experimental_debug_info()->add_original_node_names()) =
"node_name";
*(node_def.mutable_experimental_debug_info()->add_original_func_names()) =
"func_name";
EXPECT_EQ("{{function_node func_name}}{{node node_name}}",
FormatNodeDefForError(node_def));
*(node_def.mutable_experimental_debug_info()->add_original_node_names()) =
"node_name2";
*(node_def.mutable_experimental_debug_info()->add_original_func_names()) =
"func_name2";
EXPECT_EQ(
"{{function_node func_name}}{{node node_name}}, "
"{{function_node func_name2}}{{node node_name2}}",
FormatNodeDefForError(node_def));
}
TEST(AttachDef, AllowMultipleFormattedNode) {
NodeDef a;
a.set_name("a");
NodeDef b;
b.set_name("b");
Status s = Status(absl::StatusCode::kCancelled, "Error");
Status s2 = AttachDef(s, a, true);
EXPECT_EQ("Error\n\t [[{{node a}}]]", s2.message());
Status s3 = AttachDef(s2, b, true);
EXPECT_EQ("Error\n\t [[{{node a}}]]\n\t [[{{node b}}]]", s3.message());
}
TEST(AttachDef, DisallowMultipleFormattedNode) {
NodeDef a;
a.set_name("a");
NodeDef b;
b.set_name("b");
Status s = Status(absl::StatusCode::kCancelled, "Error");
Status s2 = AttachDef(s, a, false);
EXPECT_EQ("Error\n\t [[{{node a}}]]", s2.message());
Status s3 = AttachDef(s2, b, false);
EXPECT_EQ("Error\n\t [[{{node a}}]]\n\t [[b]]", s3.message());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa7ea541-2e72-40b9-9fb8-7bf6fee453ec | cpp | tensorflow/tensorflow | horizontal_input_fusion | third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion.cc | third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion_test.cc | #include "xla/service/gpu/transforms/horizontal_input_fusion.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
Shape GetInputShapeForMultiOutputFusion(const HloInstruction& instr) {
const HloInstruction* real_hero = GetRealHeroForMultiOutputFusion(instr);
if (real_hero->operands().empty()) {
return Shape();
} else {
return real_hero->operand(0)->shape();
}
}
class HorizontalInputFusionImpl {
public:
explicit HorizontalInputFusionImpl(HloComputation* computation,
const se::DeviceDescription& d)
: computation_(computation), device_info_(d) {}
~HorizontalInputFusionImpl() = default;
absl::StatusOr<bool> Run();
private:
HloComputation* computation_;
const se::DeviceDescription& device_info_;
};
bool CompareShapeDimsFromLeftToRight(const Shape& shape_a,
const Shape& shape_b) {
if (shape_a.rank() != shape_b.rank()) {
return shape_a.rank() < shape_b.rank();
}
auto dims_a = shape_a.dimensions();
auto dims_b = shape_b.dimensions();
for (size_t i = 0; i < dims_a.size(); ++i) {
if (dims_a[i] != dims_b[i]) {
return dims_a[i] < dims_b[i];
}
}
return true;
}
std::vector<HloInstruction*> FindAndSortFusionCandidates(
HloInstruction* consumer) {
absl::flat_hash_set<HloInstruction*> fusion_instr_set;
std::vector<HloInstruction*> fusion_instrs;
for (HloInstruction* opnd : consumer->operands()) {
HloInstruction* predecessor = opnd->LatestNonGteAncestor();
if (!predecessor->IsCustomFusion() &&
IsInputFusibleReduction(*predecessor) &&
IsConsumerTheOnlyNonRootUser(*predecessor, *consumer)) {
if (fusion_instr_set.insert(predecessor).second) {
fusion_instrs.push_back(predecessor);
}
}
}
std::sort(fusion_instrs.begin(), fusion_instrs.end(),
[&](const HloInstruction* a, const HloInstruction* b) {
Shape shape_a = GetInputShapeForMultiOutputFusion(*a);
Shape shape_b = GetInputShapeForMultiOutputFusion(*b);
if (!ShapeUtil::EqualIgnoringElementType(shape_a, shape_b)) {
return CompareShapeDimsFromLeftToRight(shape_a, shape_b);
}
return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b);
});
return fusion_instrs;
}
absl::StatusOr<bool> HorizontalInputFusionImpl::Run() {
bool changed = false;
XLA_VLOG_LINES(3, computation_->ToString());
std::vector<HloInstruction*> def_to_use_order =
computation_->MakeInstructionPostOrder();
for (HloInstruction* consumer : def_to_use_order) {
auto candidates = FindAndSortFusionCandidates(consumer);
if (candidates.size() <= 1) {
continue;
}
for (size_t j = 0; j < candidates.size(); ++j) {
if (candidates[j]->opcode() != HloOpcode::kFusion) {
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion_instr,
MakeFusionInstruction(candidates[j],
HloInstruction::FusionKind::kInput));
candidates[j] = fusion_instr;
changed = true;
}
}
size_t fusion_anchor_id = 0;
for (size_t j = 1; j < candidates.size(); ++j) {
HloInstruction* fusion_anchor = candidates[fusion_anchor_id];
HloInstruction* fused = candidates[j];
if (ShapesCompatibleForMultiOutputFusion(*fusion_anchor, *fused) &&
FusionFitsInBudget(*fusion_anchor, *fused, device_info_)) {
VLOG(3) << "Fuse " << fused->ToString() << " into "
<< fusion_anchor->ToString();
fusion_anchor->MergeFusionInstructionIntoMultiOutput(fused);
changed = true;
} else {
VLOG(3) << j - fusion_anchor_id - 1 << " instructions are fused.";
fusion_anchor_id = j;
}
}
}
return changed;
}
}
absl::StatusOr<bool> HorizontalInputFusion::RunOnComputation(
HloComputation* computation) {
HorizontalInputFusionImpl horizontal_fusion_impl(computation, device_info_);
return horizontal_fusion_impl.Run();
}
absl::StatusOr<bool> HorizontalInputFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(2) << "Run horizontal input fusion.";
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(changed, RunOnComputation(comp));
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/horizontal_input_fusion.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class HorizontalInputFusionTest : public GpuCodegenTest {
public:
se::DeviceDescription device_description_{
TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HorizontalInputFusion horizontal_input_fusion_{device_description_};
};
TEST_F(HorizontalInputFusionTest, BasicTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
}
fused_computation.2 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
fusion.1 = f16[] fusion(arg.1), kind=kInput, calls=fused_computation.1
fusion.2 = f16[] fusion(arg.2), kind=kInput, calls=fused_computation.2
ROOT tuple.1 = (f16[], f16[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))),
(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(HorizontalInputFusionTest, ManyInputFusions) {
auto module = CreateNewVerifiedModule();
HloComputation* reduce_computation;
{
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
reduce_computation =
module->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> var_outs;
auto input_shape = ShapeUtil::MakeShape(F32, {1024, 1024});
auto output_shape = ShapeUtil::MakeShape(F32, {1024});
for (int64_t i = 0; i < 130; ++i) {
HloInstruction* param_var_in = builder.AddInstruction(
HloInstruction::CreateParameter(i * 2 + 0, input_shape, "var.in"));
HloInstruction* param_alpha =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 2 + 1, ShapeUtil::MakeShape(F32, {}), "alpha"));
auto alpha_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(input_shape, param_alpha, {}));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
input_shape, HloOpcode::kMultiply, param_var_in, alpha_broadcasted));
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
output_shape, mul, const0, {1}, reduce_computation));
var_outs.push_back(reduce);
}
builder.AddInstruction(HloInstruction::CreateTuple(var_outs));
module->AddEntryComputation(builder.Build());
if (GetDebugOptionsForTest().xla_gpu_mlir_emitter_level() < 4) {
CompileAndVerifyIr(module->Clone(), R"(CHECK: reduce-group-6)",
false);
} else {
CompileAndVerifyIr(module->Clone(), R"(CHECK: switch {{.*}} label {{.*}} [
CHECK-NEXT: label)",
false);
}
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));
}
TEST_F(HorizontalInputFusionTest, MultiOutputFusionTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MultiOutputFusionTest
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
add.0 = f16[1024] add(arg.1, arg.1)
ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0)
}
fused_computation.2 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
add.0 = f16[1024] add(arg.1, arg.1)
ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0)
}
fused_computation.3 {
arg.0 = f16[1024]{0} parameter(0)
arg.1 = f16[1024]{0} parameter(1)
add.0 = f16[1024] add(arg.0, arg.1)
mul.0 = f16[1024] multiply(arg.0, arg.1)
ROOT tuple.1 = (f16[1024], f16[1024]) tuple(add.0, mul.0)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
fusion.1 = (f16[],f16[1024]) fusion(arg.1), kind=kInput, calls=fused_computation.1
fusion.2 = (f16[],f16[1024]) fusion(arg.2), kind=kInput, calls=fused_computation.2
gte.3 = f16[] get-tuple-element(fusion.1), index=0
gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=1
gte.2 = f16[1024]{0} get-tuple-element(fusion.2), index=1
gte.6 = f16[] get-tuple-element(fusion.2), index=0
fusion.3 = (f16[1024],f16[1024]) fusion(gte.1, gte.2),
kind=kLoop, calls=fused_computation.3
gte.4 = f16[1024] get-tuple-element(fusion.3), index=0
gte.5 = f16[1024]{0} get-tuple-element(fusion.3), index=1
ROOT tuple.1 = (f16[], f16[1024], f16[1024]{0}, f16[])
tuple(gte.3, gte.4, gte.5, gte.6)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
}
TEST_F(HorizontalInputFusionTest, NonfusionInstrs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NonfusionInstrs
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
ENTRY entry_computation {
arg.0 = f16[1024]{0} parameter(0)
arg.1 = f16[1024]{0} parameter(1)
constant0 = f16[] constant(0)
reduce.0 = f16[] reduce(arg.0, constant0), dimensions={0}, to_apply=%add_f16
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
ROOT tuple.0 = (f16[], f16[]) tuple(reduce.0, reduce.1)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))),
(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(HorizontalInputFusionTest, DoesNotFuseCustomFusions) {
auto module = ParseAndReturnVerifiedModule(R"(
max {
p0 = f16[] parameter(0)
p1 = f16[] parameter(1)
ROOT max = f16[] maximum(p0, p1)
}
triton_a {
p = f16[128,256] parameter(0)
c = f16[] constant(0)
ROOT n = f16[128] reduce(p, c), dimensions={1}, to_apply=max
}
triton_b {
p = f16[128,256] parameter(0)
c = f16[] constant(0)
ROOT n = f16[128] reduce(p, c), dimensions={1}, to_apply=max
}
ENTRY entry_computation {
p = f16[128,256] parameter(0)
fa = f16[128] fusion(p), kind=kCustom, calls=triton_a
fb = f16[128] fusion(p), kind=kCustom, calls=triton_b
ROOT tuple = (f16[128], f16[128]) tuple(fa, fb)
}
)")
.value();
EXPECT_FALSE(horizontal_input_fusion_.Run(module.get()).value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f33bd101-d6e4-43c2-8486-407ea91aac0e | cpp | tensorflow/tensorflow | sparse_fill_empty_rows_op | tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | tensorflow/core/kernels/sparse_fill_empty_rows_op_test.cc | #define EIGEN_USE_THREADS
#include <algorithm>
#include <numeric>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/fill_empty_rows_functor.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
namespace {
template <typename Device, typename T, typename Tindex>
void SparseFillEmptyRowsOpImpl(OpKernelContext* context,
AsyncOpKernel::DoneCallback done = nullptr) {
if (!done) {
done = [] {};
}
const int kIndicesInput = 0;
const int kValuesInput = 1;
const int kDenseShapeInput = 2;
const int kDefaultValueInput = 3;
const Tensor& indices_t = context->input(kIndicesInput);
const Tensor& values_t = context->input(kValuesInput);
const Tensor& dense_shape_t = context->input(kDenseShapeInput);
const Tensor& default_value_t = context->input(kDefaultValueInput);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsVector(dense_shape_t.shape()),
errors::InvalidArgument("dense_shape must be a vector, saw: ",
dense_shape_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()),
errors::InvalidArgument("indices must be a matrix, saw: ",
indices_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()),
errors::InvalidArgument("values must be a vector, saw: ",
values_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(
context, indices_t.dim_size(0) == values_t.dim_size(0),
errors::InvalidArgument("The length of `values` (", values_t.dim_size(0),
") must match the first dimension of `indices` (",
indices_t.dim_size(0), ")."),
done);
OP_REQUIRES_ASYNC(
context, indices_t.dim_size(1) == dense_shape_t.dim_size(0),
errors::InvalidArgument("The length of `dense_shape` (",
dense_shape_t.dim_size(0),
") must match the second dimension of `indices` ",
"(", indices_t.dim_size(1), ")."),
done);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsScalar(default_value_t.shape()),
errors::InvalidArgument("default_value must be a scalar, saw: ",
default_value_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0,
errors::InvalidArgument("Dense shape cannot be empty."),
done);
using FunctorType =
functor::FillEmptyRows<Device, T, Tindex, false>;
OP_REQUIRES_OK_ASYNC(context,
FunctorType()(context, default_value_t, indices_t,
values_t, dense_shape_t, done),
done);
}
}
template <typename Device, typename T, typename Tindex>
class SparseFillEmptyRowsOp : public OpKernel {
public:
explicit SparseFillEmptyRowsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
SparseFillEmptyRowsOpImpl<Device, T, Tindex>(context);
}
};
#define REGISTER_KERNELS(D, T, Tindex) \
REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \
.Device(DEVICE_##D) \
.HostMemory("dense_shape") \
.TypeConstraint<T>("T"), \
SparseFillEmptyRowsOp<D##Device, T, Tindex>)
#define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64)
TF_CALL_ALL_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#undef REGISTER_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename T, typename Tindex>
class SparseFillEmptyRowsGPUOp : public AsyncOpKernel {
public:
explicit SparseFillEmptyRowsGPUOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
SparseFillEmptyRowsOpImpl<GPUDevice, T, Tindex>(context, done);
}
};
#define REGISTER_KERNELS(T, Tindex) \
REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \
.Device(DEVICE_GPU) \
.HostMemory("dense_shape") \
.TypeConstraint<T>("T"), \
SparseFillEmptyRowsGPUOp<T, Tindex>)
#define REGISTER_KERNELS_TINDEX(T) REGISTER_KERNELS(T, int64)
TF_CALL_POD_TYPES(REGISTER_KERNELS_TINDEX)
#undef REGISTER_KERNELS_TINDEX
#undef REGISTER_KERNELS
#endif
template <typename Device, typename T, typename Tindex>
class SparseFillEmptyRowsGradOp : public OpKernel {
public:
explicit SparseFillEmptyRowsGradOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor* reverse_index_map_t;
const Tensor* grad_values_t;
OP_REQUIRES_OK(context,
context->input("reverse_index_map", &reverse_index_map_t));
OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t));
OP_REQUIRES(
context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()),
errors::InvalidArgument("reverse_index_map must be a vector, saw: ",
reverse_index_map_t->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()),
errors::InvalidArgument("grad_values must be a vector, saw: ",
grad_values_t->shape().DebugString()));
const auto reverse_index_map = reverse_index_map_t->vec<Tindex>();
const auto grad_values = grad_values_t->vec<T>();
const Tindex N = reverse_index_map_t->shape().dim_size(0);
Tensor* d_values_t;
OP_REQUIRES_OK(context, context->allocate_output(
"d_values", TensorShape({N}), &d_values_t));
auto d_values = d_values_t->vec<T>();
Tensor* d_default_value_t;
OP_REQUIRES_OK(context,
context->allocate_output("d_default_value", TensorShape({}),
&d_default_value_t));
auto d_default_value = d_default_value_t->scalar<T>();
OP_REQUIRES_OK(context, functor::FillEmptyRowsGrad<Device, T, Tindex>()(
context, reverse_index_map, grad_values,
d_values, d_default_value));
}
};
#define REGISTER_KERNELS(D, T, Tindex) \
REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRowsGrad") \
.Device(DEVICE_##D) \
.TypeConstraint<T>("T"), \
SparseFillEmptyRowsGradOp<D##Device, T, Tindex>)
#define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64)
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T, int64)
TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNELS);
#undef REGISTER_GPU_KERNELS
#endif
#undef REGISTER_KERNELS
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class SparseFillEmptyRowsTest : public OpsTestBase {
protected:
void MakeOp(DataType index_type, DataType value_type) {
TF_ASSERT_OK(NodeDefBuilder("sparsefillemptyrows", "SparseFillEmptyRows")
.Input(FakeInput(index_type))
.Input(FakeInput(value_type))
.Input(FakeInput(index_type))
.Input(FakeInput(value_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SparseFillEmptyRowsTest, SparseFillEmptyRows) {
MakeOp(DT_INT64, DT_FLOAT);
AddInputFromArray<int64>(TensorShape({4, 2}), {0, 1, 0, 3, 2, 0, 3, 1});
AddInputFromArray<float>(TensorShape({4}), {0, 3, 1, 2});
AddInputFromArray<int64>(TensorShape({2}), {5, 6});
AddInputFromArray<float>(TensorShape({}), {4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected0(allocator(), DT_INT64, {6, 2});
expected0.tensor<int64, 2>()(0, 0) = 0;
expected0.tensor<int64, 2>()(0, 1) = 1;
expected0.tensor<int64, 2>()(1, 0) = 0;
expected0.tensor<int64, 2>()(1, 1) = 3;
expected0.tensor<int64, 2>()(2, 0) = 1;
expected0.tensor<int64, 2>()(2, 1) = 0;
expected0.tensor<int64, 2>()(3, 0) = 2;
expected0.tensor<int64, 2>()(3, 1) = 0;
expected0.tensor<int64, 2>()(4, 0) = 3;
expected0.tensor<int64, 2>()(4, 1) = 1;
expected0.tensor<int64, 2>()(5, 0) = 4;
expected0.tensor<int64, 2>()(5, 1) = 0;
test::ExpectTensorEqual<int64>(expected0, *GetOutput(0));
Tensor expected1(allocator(), DT_FLOAT, {6});
test::FillValues<float>(&expected1, {0, 3, 4, 1, 2, 4});
test::ExpectTensorEqual<float>(expected1, *GetOutput(1));
Tensor expected2(allocator(), DT_BOOL, {5});
test::FillValues<bool>(&expected2, {false, true, false, false, true});
test::ExpectTensorEqual<bool>(expected2, *GetOutput(2));
Tensor expected3(allocator(), DT_INT64, {4});
test::FillValues<int64>(&expected3, {0, 1, 3, 4});
test::ExpectTensorEqual<int64>(expected3, *GetOutput(3));
}
TEST_F(SparseFillEmptyRowsTest, IndicesValuesUnmatch) {
MakeOp(DT_INT64, DT_FLOAT);
AddInputFromArray<int64>(TensorShape({4, 2}), {0, 1, 0, 3, 2, 0, 3, 1});
AddInputFromArray<float>(TensorShape({3}), {0, 3, 1});
AddInputFromArray<int64>(TensorShape({2}), {5, 6});
AddInputFromArray<float>(TensorShape({}), {4});
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"The length of `values` (3) must match the "
"first dimension of `indices` (4)."));
}
TEST_F(SparseFillEmptyRowsTest, IndicesDenseShapeUnmatch) {
MakeOp(DT_INT64, DT_FLOAT);
AddInputFromArray<int64>(TensorShape({4, 0}), {});
AddInputFromArray<float>(TensorShape({4}), {0, 3, 1, 2});
AddInputFromArray<int64>(TensorShape({2}), {5, 6});
AddInputFromArray<float>(TensorShape({}), {4});
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"The length of `dense_shape` (2) must match "
"the second dimension of `indices` (0)."));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_fill_empty_rows_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f0985b7a-a24b-4d9e-9677-12ff0a6d68d6 | cpp | google/tensorstore | verbose_flag | tensorstore/internal/log/verbose_flag.cc | tensorstore/internal/log/verbose_flag_test.cc | #include "tensorstore/internal/log/verbose_flag.h"
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/env.h"
ABSL_FLAG(std::string, tensorstore_verbose_logging, {},
"comma-separated list of tensorstore verbose logging flags")
.OnUpdate([]() {
if (!absl::GetFlag(FLAGS_tensorstore_verbose_logging).empty()) {
tensorstore::internal_log::UpdateVerboseLogging(
absl::GetFlag(FLAGS_tensorstore_verbose_logging), true);
}
});
namespace tensorstore {
namespace internal_log {
namespace {
ABSL_CONST_INIT absl::Mutex g_mutex(absl::kConstInit);
ABSL_CONST_INIT VerboseFlag* g_list_head ABSL_GUARDED_BY(g_mutex) = nullptr;
struct LoggingLevelConfig {
int default_level = -1;
absl::flat_hash_map<std::string, int> levels;
};
void UpdateLoggingLevelConfig(LoggingLevelConfig& config,
std::string_view input) {
auto& levels = config.levels;
for (std::string_view flag : absl::StrSplit(input, ',', absl::SkipEmpty())) {
const size_t eq = flag.rfind('=');
if (eq == flag.npos) {
levels.insert_or_assign(std::string(flag), 0);
continue;
}
if (eq == 0) continue;
int level;
if (!absl::SimpleAtoi(flag.substr(eq + 1), &level)) continue;
if (level < -1) {
level = -1;
} else if (level > 1000) {
level = 1000;
}
levels.insert_or_assign(std::string(flag.substr(0, eq)), level);
}
config.default_level = -1;
if (auto it = levels.find("all"); it != levels.end()) {
config.default_level = it->second;
}
}
int GetLevelForVerboseFlag(const LoggingLevelConfig& config,
std::string_view name) {
while (!name.empty()) {
auto it = config.levels.find(name);
if (it != config.levels.end()) {
return it->second;
}
auto pos = name.rfind('.');
if (pos == name.npos) {
break;
}
name = name.substr(0, pos);
}
return config.default_level;
}
LoggingLevelConfig& GetLoggingLevelConfig()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(g_mutex) {
static absl::NoDestructor<LoggingLevelConfig> flags{[] {
LoggingLevelConfig config;
if (auto env = internal::GetEnv("TENSORSTORE_VERBOSE_LOGGING"); env) {
UpdateLoggingLevelConfig(config, *env);
}
return config;
}()};
return *flags;
}
}
void UpdateVerboseLogging(std::string_view input, bool overwrite)
ABSL_LOCKS_EXCLUDED(g_mutex) {
ABSL_LOG(INFO) << "--tensorstore_verbose_logging=" << input;
LoggingLevelConfig config;
UpdateLoggingLevelConfig(config, input);
absl::MutexLock lock(&g_mutex);
VerboseFlag* slist = g_list_head;
LoggingLevelConfig& global_config = GetLoggingLevelConfig();
std::swap(global_config.levels, config.levels);
std::swap(global_config.default_level, config.default_level);
if (!overwrite) {
if (global_config.levels.count("all")) {
global_config.default_level = config.default_level;
}
global_config.levels.merge(config.levels);
}
int vlevel = GetLevelForVerboseFlag(global_config, "verbose_logging");
while (slist != nullptr) {
int value = GetLevelForVerboseFlag(global_config, slist->name_);
ABSL_LOG_IF(INFO, vlevel >= 1) << slist->name_ << "=" << value;
slist->value_.store(value, std::memory_order_seq_cst);
slist = slist->next_;
}
}
int VerboseFlag::RegisterVerboseFlag(VerboseFlag* flag) {
absl::MutexLock lock(&g_mutex);
int old_v = flag->value_.load(std::memory_order_relaxed);
if (old_v == kValueUninitialized) {
const auto& config = GetLoggingLevelConfig();
old_v = GetLevelForVerboseFlag(config, flag->name_);
flag->value_.store(old_v, std::memory_order_relaxed);
flag->next_ = std::exchange(g_list_head, flag);
}
return old_v;
}
bool VerboseFlag::VerboseFlagSlowPath(VerboseFlag* flag, int old_v, int level) {
if (ABSL_PREDICT_TRUE(old_v != kValueUninitialized)) {
return old_v >= level;
}
old_v = RegisterVerboseFlag(flag);
return ABSL_PREDICT_FALSE(old_v >= level);
}
static_assert(std::is_trivially_destructible<VerboseFlag>::value,
"VerboseFlag must be trivially destructible");
}
} | #include "tensorstore/internal/log/verbose_flag.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/attributes.h"
using ::tensorstore::internal_log::UpdateVerboseLogging;
using ::tensorstore::internal_log::VerboseFlag;
#define TENSORSTORE_VERBOSE_FLAG(X) \
[]() -> ::tensorstore::internal_log::VerboseFlag& { \
ABSL_CONST_INIT static ::tensorstore::internal_log::VerboseFlag flag(X); \
return flag; \
}()
namespace {
TEST(VerboseFlag, Basic) {
UpdateVerboseLogging("a=2", true);
ABSL_CONST_INIT static VerboseFlag a("a");
ABSL_CONST_INIT static VerboseFlag ab("a.b");
auto& b = TENSORSTORE_VERBOSE_FLAG("b");
EXPECT_THAT((bool)a, true);
EXPECT_THAT(a.Level(0), true);
EXPECT_THAT(a.Level(1), true);
EXPECT_THAT(a.Level(2), true);
EXPECT_THAT(a.Level(3), false);
EXPECT_THAT(ab.Level(3), false);
EXPECT_THAT(ab.Level(2), true);
EXPECT_THAT(ab.Level(1), true);
EXPECT_THAT(ab.Level(0), true);
EXPECT_THAT((bool)ab, true);
EXPECT_THAT((bool)b, false);
EXPECT_THAT(b.Level(0), false);
UpdateVerboseLogging("b,a=-1,a.b=1", false);
EXPECT_THAT((bool)a, false);
EXPECT_THAT(a.Level(0), false);
EXPECT_THAT(a.Level(1), false);
EXPECT_THAT((bool)ab, true);
EXPECT_THAT(ab.Level(0), true);
EXPECT_THAT(ab.Level(1), true);
EXPECT_THAT(ab.Level(2), false);
EXPECT_THAT((bool)b, true);
EXPECT_THAT(b.Level(0), true);
EXPECT_THAT(b.Level(1), false);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/log/verbose_flag.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/log/verbose_flag_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6857c23b-75e7-41e2-917d-67f9283e7e56 | cpp | google/quiche | http_decoder | quiche/quic/core/http/http_decoder.cc | quiche/quic/core/http/http_decoder_test.cc | #include "quiche/quic/core/http/http_decoder.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/quic/core/http/http_frames.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
constexpr QuicByteCount kPayloadLengthLimit = 1024 * 1024;
}
HttpDecoder::HttpDecoder(Visitor* visitor)
: visitor_(visitor),
allow_web_transport_stream_(false),
state_(STATE_READING_FRAME_TYPE),
current_frame_type_(0),
current_length_field_length_(0),
remaining_length_field_length_(0),
current_frame_length_(0),
remaining_frame_length_(0),
current_type_field_length_(0),
remaining_type_field_length_(0),
error_(QUIC_NO_ERROR),
error_detail_(""),
enable_origin_frame_(GetQuicReloadableFlag(enable_h3_origin_frame)) {
QUICHE_DCHECK(visitor_);
}
HttpDecoder::~HttpDecoder() {}
bool HttpDecoder::DecodeSettings(const char* data, QuicByteCount len,
SettingsFrame* frame) {
QuicDataReader reader(data, len);
uint64_t frame_type;
if (!reader.ReadVarInt62(&frame_type)) {
QUIC_DLOG(ERROR) << "Unable to read frame type.";
return false;
}
if (frame_type != static_cast<uint64_t>(HttpFrameType::SETTINGS)) {
QUIC_DLOG(ERROR) << "Invalid frame type " << frame_type;
return false;
}
absl::string_view frame_contents;
if (!reader.ReadStringPieceVarInt62(&frame_contents)) {
QUIC_DLOG(ERROR) << "Failed to read SETTINGS frame contents";
return false;
}
QuicDataReader frame_reader(frame_contents);
while (!frame_reader.IsDoneReading()) {
uint64_t id;
if (!frame_reader.ReadVarInt62(&id)) {
QUIC_DLOG(ERROR) << "Unable to read setting identifier.";
return false;
}
uint64_t content;
if (!frame_reader.ReadVarInt62(&content)) {
QUIC_DLOG(ERROR) << "Unable to read setting value.";
return false;
}
auto result = frame->values.insert({id, content});
if (!result.second) {
QUIC_DLOG(ERROR) << "Duplicate setting identifier.";
return false;
}
}
return true;
}
QuicByteCount HttpDecoder::ProcessInput(const char* data, QuicByteCount len) {
QUICHE_DCHECK_EQ(QUIC_NO_ERROR, error_);
QUICHE_DCHECK_NE(STATE_ERROR, state_);
QuicDataReader reader(data, len);
bool continue_processing = true;
while (continue_processing && (reader.BytesRemaining() != 0 ||
state_ == STATE_BUFFER_OR_PARSE_PAYLOAD ||
state_ == STATE_FINISH_PARSING)) {
QUICHE_DCHECK_EQ(QUIC_NO_ERROR, error_);
QUICHE_DCHECK_NE(STATE_ERROR, state_);
switch (state_) {
case STATE_READING_FRAME_TYPE:
continue_processing = ReadFrameType(reader);
break;
case STATE_READING_FRAME_LENGTH:
continue_processing = ReadFrameLength(reader);
break;
case STATE_BUFFER_OR_PARSE_PAYLOAD:
continue_processing = BufferOrParsePayload(reader);
break;
case STATE_READING_FRAME_PAYLOAD:
continue_processing = ReadFramePayload(reader);
break;
case STATE_FINISH_PARSING:
continue_processing = FinishParsing();
break;
case STATE_PARSING_NO_LONGER_POSSIBLE:
continue_processing = false;
QUIC_BUG(HttpDecoder PARSING_NO_LONGER_POSSIBLE)
<< "HttpDecoder called after an indefinite-length frame has been "
"received";
RaiseError(QUIC_INTERNAL_ERROR,
"HttpDecoder called after an indefinite-length frame has "
"been received");
break;
case STATE_ERROR:
break;
default:
QUIC_BUG(quic_bug_10411_1) << "Invalid state: " << state_;
}
}
return len - reader.BytesRemaining();
}
bool HttpDecoder::ReadFrameType(QuicDataReader& reader) {
QUICHE_DCHECK_NE(0u, reader.BytesRemaining());
if (current_type_field_length_ == 0) {
current_type_field_length_ = reader.PeekVarInt62Length();
QUICHE_DCHECK_NE(0u, current_type_field_length_);
if (current_type_field_length_ > reader.BytesRemaining()) {
remaining_type_field_length_ = current_type_field_length_;
BufferFrameType(reader);
return true;
}
bool success = reader.ReadVarInt62(¤t_frame_type_);
QUICHE_DCHECK(success);
} else {
BufferFrameType(reader);
if (remaining_type_field_length_ != 0) {
return true;
}
QuicDataReader type_reader(type_buffer_.data(), current_type_field_length_);
bool success = type_reader.ReadVarInt62(¤t_frame_type_);
QUICHE_DCHECK(success);
}
if (decoded_frame_types_.size() < 10) {
decoded_frame_types_.push_back(current_frame_type_);
}
if (current_frame_type_ ==
static_cast<uint64_t>(http2::Http2FrameType::PRIORITY) ||
current_frame_type_ ==
static_cast<uint64_t>(http2::Http2FrameType::PING) ||
current_frame_type_ ==
static_cast<uint64_t>(http2::Http2FrameType::WINDOW_UPDATE) ||
current_frame_type_ ==
static_cast<uint64_t>(http2::Http2FrameType::CONTINUATION)) {
RaiseError(QUIC_HTTP_RECEIVE_SPDY_FRAME,
absl::StrCat("HTTP/2 frame received in a HTTP/3 connection: ",
current_frame_type_));
return false;
}
if (current_frame_type_ ==
static_cast<uint64_t>(HttpFrameType::CANCEL_PUSH)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "CANCEL_PUSH frame received.");
return false;
}
if (current_frame_type_ ==
static_cast<uint64_t>(HttpFrameType::PUSH_PROMISE)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "PUSH_PROMISE frame received.");
return false;
}
state_ = STATE_READING_FRAME_LENGTH;
return true;
}
bool HttpDecoder::ReadFrameLength(QuicDataReader& reader) {
QUICHE_DCHECK_NE(0u, reader.BytesRemaining());
if (current_length_field_length_ == 0) {
current_length_field_length_ = reader.PeekVarInt62Length();
QUICHE_DCHECK_NE(0u, current_length_field_length_);
if (current_length_field_length_ > reader.BytesRemaining()) {
remaining_length_field_length_ = current_length_field_length_;
BufferFrameLength(reader);
return true;
}
bool success = reader.ReadVarInt62(¤t_frame_length_);
QUICHE_DCHECK(success);
} else {
BufferFrameLength(reader);
if (remaining_length_field_length_ != 0) {
return true;
}
QuicDataReader length_reader(length_buffer_.data(),
current_length_field_length_);
bool success = length_reader.ReadVarInt62(¤t_frame_length_);
QUICHE_DCHECK(success);
}
if (allow_web_transport_stream_ &&
current_frame_type_ ==
static_cast<uint64_t>(HttpFrameType::WEBTRANSPORT_STREAM)) {
visitor_->OnWebTransportStreamFrameType(
current_length_field_length_ + current_type_field_length_,
current_frame_length_);
state_ = STATE_PARSING_NO_LONGER_POSSIBLE;
return false;
}
if (IsFrameBuffered() &&
current_frame_length_ > MaxFrameLength(current_frame_type_)) {
RaiseError(QUIC_HTTP_FRAME_TOO_LARGE, "Frame is too large.");
return false;
}
bool continue_processing = true;
const QuicByteCount header_length =
current_length_field_length_ + current_type_field_length_;
switch (current_frame_type_) {
case static_cast<uint64_t>(HttpFrameType::DATA):
continue_processing =
visitor_->OnDataFrameStart(header_length, current_frame_length_);
break;
case static_cast<uint64_t>(HttpFrameType::HEADERS):
continue_processing =
visitor_->OnHeadersFrameStart(header_length, current_frame_length_);
break;
case static_cast<uint64_t>(HttpFrameType::CANCEL_PUSH):
QUICHE_NOTREACHED();
break;
case static_cast<uint64_t>(HttpFrameType::SETTINGS):
continue_processing = visitor_->OnSettingsFrameStart(header_length);
break;
case static_cast<uint64_t>(HttpFrameType::PUSH_PROMISE):
QUICHE_NOTREACHED();
break;
case static_cast<uint64_t>(HttpFrameType::GOAWAY):
break;
case static_cast<uint64_t>(HttpFrameType::MAX_PUSH_ID):
break;
case static_cast<uint64_t>(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM):
continue_processing = visitor_->OnPriorityUpdateFrameStart(header_length);
break;
case static_cast<uint64_t>(HttpFrameType::ACCEPT_CH):
continue_processing = visitor_->OnAcceptChFrameStart(header_length);
break;
case static_cast<uint64_t>(HttpFrameType::METADATA):
continue_processing =
visitor_->OnMetadataFrameStart(header_length, current_frame_length_);
break;
default:
if (enable_origin_frame_ &&
current_frame_type_ == static_cast<uint64_t>(HttpFrameType::ORIGIN)) {
QUIC_CODE_COUNT_N(enable_h3_origin_frame, 1, 2);
continue_processing = visitor_->OnOriginFrameStart(header_length);
break;
}
continue_processing = visitor_->OnUnknownFrameStart(
current_frame_type_, header_length, current_frame_length_);
break;
}
remaining_frame_length_ = current_frame_length_;
if (IsFrameBuffered()) {
state_ = STATE_BUFFER_OR_PARSE_PAYLOAD;
return continue_processing;
}
state_ = (remaining_frame_length_ == 0) ? STATE_FINISH_PARSING
: STATE_READING_FRAME_PAYLOAD;
return continue_processing;
}
bool HttpDecoder::IsFrameBuffered() {
switch (current_frame_type_) {
case static_cast<uint64_t>(HttpFrameType::SETTINGS):
return true;
case static_cast<uint64_t>(HttpFrameType::GOAWAY):
return true;
case static_cast<uint64_t>(HttpFrameType::MAX_PUSH_ID):
return true;
case static_cast<uint64_t>(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM):
return true;
case static_cast<uint64_t>(HttpFrameType::ORIGIN):
if (enable_origin_frame_) {
QUIC_CODE_COUNT_N(enable_h3_origin_frame, 2, 2);
return true;
}
return false;
case static_cast<uint64_t>(HttpFrameType::ACCEPT_CH):
return true;
}
return false;
}
bool HttpDecoder::ReadFramePayload(QuicDataReader& reader) {
QUICHE_DCHECK(!IsFrameBuffered());
QUICHE_DCHECK_NE(0u, reader.BytesRemaining());
QUICHE_DCHECK_NE(0u, remaining_frame_length_);
bool continue_processing = true;
switch (current_frame_type_) {
case static_cast<uint64_t>(HttpFrameType::DATA): {
QuicByteCount bytes_to_read = std::min<QuicByteCount>(
remaining_frame_length_, reader.BytesRemaining());
absl::string_view payload;
bool success = reader.ReadStringPiece(&payload, bytes_to_read);
QUICHE_DCHECK(success);
QUICHE_DCHECK(!payload.empty());
continue_processing = visitor_->OnDataFramePayload(payload);
remaining_frame_length_ -= payload.length();
break;
}
case static_cast<uint64_t>(HttpFrameType::HEADERS): {
QuicByteCount bytes_to_read = std::min<QuicByteCount>(
remaining_frame_length_, reader.BytesRemaining());
absl::string_view payload;
bool success = reader.ReadStringPiece(&payload, bytes_to_read);
QUICHE_DCHECK(success);
QUICHE_DCHECK(!payload.empty());
continue_processing = visitor_->OnHeadersFramePayload(payload);
remaining_frame_length_ -= payload.length();
break;
}
case static_cast<uint64_t>(HttpFrameType::CANCEL_PUSH): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::SETTINGS): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::PUSH_PROMISE): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::GOAWAY): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::MAX_PUSH_ID): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::ACCEPT_CH): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::METADATA): {
QuicByteCount bytes_to_read = std::min<QuicByteCount>(
remaining_frame_length_, reader.BytesRemaining());
absl::string_view payload;
bool success = reader.ReadStringPiece(&payload, bytes_to_read);
QUICHE_DCHECK(success);
QUICHE_DCHECK(!payload.empty());
continue_processing = visitor_->OnMetadataFramePayload(payload);
remaining_frame_length_ -= payload.length();
break;
}
default: {
if (enable_origin_frame_ &&
current_frame_type_ == static_cast<uint64_t>(HttpFrameType::ORIGIN)) {
QUICHE_NOTREACHED();
break;
}
continue_processing = HandleUnknownFramePayload(reader);
break;
}
}
if (remaining_frame_length_ == 0) {
state_ = STATE_FINISH_PARSING;
}
return continue_processing;
}
bool HttpDecoder::FinishParsing() {
QUICHE_DCHECK(!IsFrameBuffered());
QUICHE_DCHECK_EQ(0u, remaining_frame_length_);
bool continue_processing = true;
switch (current_frame_type_) {
case static_cast<uint64_t>(HttpFrameType::DATA): {
continue_processing = visitor_->OnDataFrameEnd();
break;
}
case static_cast<uint64_t>(HttpFrameType::HEADERS): {
continue_processing = visitor_->OnHeadersFrameEnd();
break;
}
case static_cast<uint64_t>(HttpFrameType::CANCEL_PUSH): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::SETTINGS): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::PUSH_PROMISE): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::GOAWAY): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::MAX_PUSH_ID): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::ACCEPT_CH): {
QUICHE_NOTREACHED();
break;
}
case static_cast<uint64_t>(HttpFrameType::METADATA): {
continue_processing = visitor_->OnMetadataFrameEnd();
break;
}
default:
if (enable_origin_frame_ &&
current_frame_type_ == static_cast<uint64_t>(HttpFrameType::ORIGIN)) {
QUICHE_NOTREACHED();
break;
}
continue_processing = visitor_->OnUnknownFrameEnd();
}
ResetForNextFrame();
return continue_processing;
}
void HttpDecoder::ResetForNextFrame() {
current_length_field_length_ = 0;
current_type_field_length_ = 0;
state_ = STATE_READING_FRAME_TYPE;
}
bool HttpDecoder::HandleUnknownFramePayload(QuicDataReader& reader) {
QuicByteCount bytes_to_read =
std::min<QuicByteCount>(remaining_frame_length_, reader.BytesRemaining());
absl::string_view payload;
bool success = reader.ReadStringPiece(&payload, bytes_to_read);
QUICHE_DCHECK(success);
QUICHE_DCHECK(!payload.empty());
remaining_frame_length_ -= payload.length();
return visitor_->OnUnknownFramePayload(payload);
}
bool HttpDecoder::BufferOrParsePayload(QuicDataReader& reader) {
QUICHE_DCHECK(IsFrameBuffered());
QUICHE_DCHECK_EQ(current_frame_length_,
buffer_.size() + remaining_frame_length_);
if (buffer_.empty() && reader.BytesRemaining() >= current_frame_length_) {
remaining_frame_length_ = 0;
QuicDataReader current_payload_reader(reader.PeekRemainingPayload().data(),
current_frame_length_);
bool continue_processing = ParseEntirePayload(current_payload_reader);
reader.Seek(current_frame_length_);
ResetForNextFrame();
return continue_processing;
}
QuicByteCount bytes_to_read =
std::min<QuicByteCount>(remaining_frame_length_, reader.BytesRemaining());
absl::StrAppend(&buffer_, reader.PeekRemainingPayload().substr(
0, bytes_to_read));
reader.Seek(bytes_to_read);
remaining_frame_length_ -= bytes_to_read;
QUICHE_DCHECK_EQ(current_frame_length_,
buffer_.size() + remaining_frame_length_);
if (remaining_frame_length_ > 0) {
QUICHE_DCHECK(reader.IsDoneReading());
return false;
}
QuicDataReader buffer_reader(buffer_);
bool continue_processing = ParseEntirePayload(buffer_reader);
buffer_.clear();
ResetForNextFrame();
return continue_processing;
}
bool HttpDecoder::ParseEntirePayload(QuicDataReader& reader) {
QUICHE_DCHECK(IsFrameBuffered());
QUICHE_DCHECK_EQ(current_frame_length_, reader.BytesRemaining());
QUICHE_DCHECK_EQ(0u, remaining_frame_length_);
switch (current_frame_type_) {
case static_cast<uint64_t>(HttpFrameType::CANCEL_PUSH): {
QUICHE_NOTREACHED();
return false;
}
case static_cast<uint64_t>(HttpFrameType::SETTINGS): {
SettingsFrame frame;
if (!ParseSettingsFrame(reader, frame)) {
return false;
}
return visitor_->OnSettingsFrame(frame);
}
case static_cast<uint64_t>(HttpFrameType::GOAWAY): {
GoAwayFrame frame;
if (!reader.ReadVarInt62(&frame.id)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read GOAWAY ID.");
return false;
}
if (!reader.IsDoneReading()) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Superfluous data in GOAWAY frame.");
return false;
}
return visitor_->OnGoAwayFrame(frame);
}
case static_cast<uint64_t>(HttpFrameType::MAX_PUSH_ID): {
uint64_t unused;
if (!reader.ReadVarInt62(&unused)) {
RaiseError(QUIC_HTTP_FRAME_ERROR,
"Unable to read MAX_PUSH_ID push_id.");
return false;
}
if (!reader.IsDoneReading()) {
RaiseError(QUIC_HTTP_FRAME_ERROR,
"Superfluous data in MAX_PUSH_ID frame.");
return false;
}
return visitor_->OnMaxPushIdFrame();
}
case static_cast<uint64_t>(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM): {
PriorityUpdateFrame frame;
if (!ParsePriorityUpdateFrame(reader, frame)) {
return false;
}
return visitor_->OnPriorityUpdateFrame(frame);
}
case static_cast<uint64_t>(HttpFrameType::ORIGIN): {
OriginFrame frame;
if (!ParseOriginFrame(reader, frame)) {
return false;
}
return visitor_->OnOriginFrame(frame);
}
case static_cast<uint64_t>(HttpFrameType::ACCEPT_CH): {
AcceptChFrame frame;
if (!ParseAcceptChFrame(reader, frame)) {
return false;
}
return visitor_->OnAcceptChFrame(frame);
}
default:
QUICHE_NOTREACHED();
return false;
}
}
void HttpDecoder::BufferFrameLength(QuicDataReader& reader) {
QuicByteCount bytes_to_read = std::min<QuicByteCount>(
remaining_length_field_length_, reader.BytesRemaining());
bool success =
reader.ReadBytes(length_buffer_.data() + current_length_field_length_ -
remaining_length_field_length_,
bytes_to_read);
QUICHE_DCHECK(success);
remaining_length_field_length_ -= bytes_to_read;
}
void HttpDecoder::BufferFrameType(QuicDataReader& reader) {
QuicByteCount bytes_to_read = std::min<QuicByteCount>(
remaining_type_field_length_, reader.BytesRemaining());
bool success =
reader.ReadBytes(type_buffer_.data() + current_type_field_length_ -
remaining_type_field_length_,
bytes_to_read);
QUICHE_DCHECK(success);
remaining_type_field_length_ -= bytes_to_read;
}
void HttpDecoder::RaiseError(QuicErrorCode error, std::string error_detail) {
state_ = STATE_ERROR;
error_ = error;
error_detail_ = std::move(error_detail);
visitor_->OnError(this);
}
bool HttpDecoder::ParseSettingsFrame(QuicDataReader& reader,
SettingsFrame& frame) {
while (!reader.IsDoneReading()) {
uint64_t id;
if (!reader.ReadVarInt62(&id)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read setting identifier.");
return false;
}
uint64_t content;
if (!reader.ReadVarInt62(&content)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read setting value.");
return false;
}
auto result = frame.values.insert({id, content});
if (!result.second) {
RaiseError(QUIC_HTTP_DUPLICATE_SETTING_IDENTIFIER,
"Duplicate setting identifier.");
return false;
}
}
return true;
}
bool HttpDecoder::ParsePriorityUpdateFrame(QuicDataReader& reader,
PriorityUpdateFrame& frame) {
if (!reader.ReadVarInt62(&frame.prioritized_element_id)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read prioritized element id.");
return false;
}
absl::string_view priority_field_value = reader.ReadRemainingPayload();
frame.priority_field_value =
std::string(priority_field_value.data(), priority_field_value.size());
return true;
}
bool HttpDecoder::ParseOriginFrame(QuicDataReader& reader, OriginFrame& frame) {
QUICHE_DCHECK(enable_origin_frame_);
while (!reader.IsDoneReading()) {
absl::string_view origin;
if (!reader.ReadStringPiece16(&origin)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read ORIGIN origin.");
return false;
}
frame.origins.push_back(std::string(origin));
}
return true;
}
bool HttpDecoder::ParseAcceptChFrame(QuicDataReader& reader,
AcceptChFrame& frame) {
absl::string_view origin;
absl::string_view value;
while (!reader.IsDoneReading()) {
if (!reader.ReadStringPieceVarInt62(&origin)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read ACCEPT_CH origin.");
return false;
}
if (!reader.ReadStringPieceVarInt62(&value)) {
RaiseError(QUIC_HTTP_FRAME_ERROR, "Unable to read ACCEPT_CH value.");
return false;
}
frame.entries.push_back({std::string(origin.data(), origin.size()),
std::string(value.data(), value.size())});
}
return true;
}
QuicByteCount HttpDecoder::MaxFrameLength(uint64_t frame_type) {
QUICHE_DCHECK(IsFrameBuffered());
switch (frame_type) {
case static_cast<uint64_t>(HttpFrameType::SETTINGS):
return kPayloadLengthLimit;
case static_cast<uint64_t>(HttpFrameType::GOAWAY):
return quiche::VARIABLE_LENGTH_INTEGER_LENGTH_8;
case static_cast<uint64_t>(HttpFrameType::MAX_PUSH_ID):
return quiche::VARIABLE_LENGTH_INTEGER_LENGTH_8;
case static_cast<uint64_t>(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM):
return kPayloadLengthLimit;
case static_cast<uint64_t>(HttpFrameType::ACCEPT_CH):
return kPayloadLengthLimit;
case static_cast<uint64_t>(HttpFrameType::ORIGIN):
return kPayloadLengthLimit;
default:
QUICHE_NOTREACHED();
return 0;
}
}
std::string HttpDecoder::DebugString() const {
return absl::StrCat(
"HttpDecoder:", "\n state: ", state_, "\n error: ", error_,
"\n current_frame_type: ", current_frame_type_,
"\n current_length_field_length: ", current_length_field_length_,
"\n remaining_length_field_length: ", remaining_length_field_length_,
"\n current_frame_length: ", current_frame_length_,
"\n remaining_frame_length: ", remaining_frame_length_,
"\n current_type_field_length: ", current_type_field_length_,
"\n remaining_type_field_length: ", remaining_type_field_length_);
}
} | #include "quiche/quic/core/http/http_decoder.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/http_encoder.h"
#include "quiche/quic/core/http/http_frames.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::InSequence;
using ::testing::Return;
namespace quic {
namespace test {
class HttpDecoderPeer {
public:
static uint64_t current_frame_type(HttpDecoder* decoder) {
return decoder->current_frame_type_;
}
};
namespace {
class HttpDecoderTest : public QuicTest {
public:
HttpDecoderTest() : decoder_(&visitor_) {
ON_CALL(visitor_, OnMaxPushIdFrame()).WillByDefault(Return(true));
ON_CALL(visitor_, OnGoAwayFrame(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnSettingsFrameStart(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnSettingsFrame(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnDataFrameStart(_, _)).WillByDefault(Return(true));
ON_CALL(visitor_, OnDataFramePayload(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnDataFrameEnd()).WillByDefault(Return(true));
ON_CALL(visitor_, OnHeadersFrameStart(_, _)).WillByDefault(Return(true));
ON_CALL(visitor_, OnHeadersFramePayload(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnHeadersFrameEnd()).WillByDefault(Return(true));
ON_CALL(visitor_, OnPriorityUpdateFrameStart(_))
.WillByDefault(Return(true));
ON_CALL(visitor_, OnPriorityUpdateFrame(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnAcceptChFrameStart(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnAcceptChFrame(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnOriginFrameStart(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnOriginFrame(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnMetadataFrameStart(_, _)).WillByDefault(Return(true));
ON_CALL(visitor_, OnMetadataFramePayload(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnMetadataFrameEnd()).WillByDefault(Return(true));
ON_CALL(visitor_, OnUnknownFrameStart(_, _, _)).WillByDefault(Return(true));
ON_CALL(visitor_, OnUnknownFramePayload(_)).WillByDefault(Return(true));
ON_CALL(visitor_, OnUnknownFrameEnd()).WillByDefault(Return(true));
}
~HttpDecoderTest() override = default;
uint64_t current_frame_type() {
return HttpDecoderPeer::current_frame_type(&decoder_);
}
QuicByteCount ProcessInput(absl::string_view input) {
return decoder_.ProcessInput(input.data(), input.size());
}
void ProcessInputCharByChar(absl::string_view input) {
for (char c : input) {
EXPECT_EQ(1u, decoder_.ProcessInput(&c, 1));
}
}
QuicByteCount ProcessInputWithGarbageAppended(absl::string_view input) {
std::string input_with_garbage_appended = absl::StrCat(input, "blahblah");
QuicByteCount processed_bytes = ProcessInput(input_with_garbage_appended);
QUICHE_DCHECK_LE(processed_bytes, input_with_garbage_appended.size());
EXPECT_LE(processed_bytes, input.size());
return processed_bytes;
}
testing::StrictMock<MockHttpDecoderVisitor> visitor_;
HttpDecoder decoder_;
};
TEST_F(HttpDecoderTest, InitialState) {
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, UnknownFrame) {
std::unique_ptr<char[]> input;
const QuicByteCount payload_lengths[] = {0, 14, 100};
const uint64_t frame_types[] = {
0x21, 0x40, 0x5f, 0x7e, 0x9d,
0x6f, 0x14
};
for (auto payload_length : payload_lengths) {
std::string data(payload_length, 'a');
for (auto frame_type : frame_types) {
const QuicByteCount total_length =
QuicDataWriter::GetVarInt62Len(frame_type) +
QuicDataWriter::GetVarInt62Len(payload_length) + payload_length;
input = std::make_unique<char[]>(total_length);
QuicDataWriter writer(total_length, input.get());
writer.WriteVarInt62(frame_type);
writer.WriteVarInt62(payload_length);
const QuicByteCount header_length = writer.length();
if (payload_length > 0) {
writer.WriteStringPiece(data);
}
EXPECT_CALL(visitor_, OnUnknownFrameStart(frame_type, header_length,
payload_length));
if (payload_length > 0) {
EXPECT_CALL(visitor_, OnUnknownFramePayload(Eq(data)));
}
EXPECT_CALL(visitor_, OnUnknownFrameEnd());
EXPECT_EQ(total_length, decoder_.ProcessInput(input.get(), total_length));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
ASSERT_EQ("", decoder_.error_detail());
EXPECT_EQ(frame_type, current_frame_type());
}
}
}
TEST_F(HttpDecoderTest, CancelPush) {
InSequence s;
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("03"
"01"
"01",
&input));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(1u, ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ("CANCEL_PUSH frame received.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, PushPromiseFrame) {
InSequence s;
std::string push_promise_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("05"
"08"
"1f",
&push_promise_bytes));
std::string input = absl::StrCat(push_promise_bytes,
"Headers");
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(1u, ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ("PUSH_PROMISE frame received.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, MaxPushId) {
InSequence s;
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("0D"
"01"
"01",
&input));
EXPECT_CALL(visitor_, OnMaxPushIdFrame()).WillOnce(Return(false));
EXPECT_EQ(input.size(), ProcessInputWithGarbageAppended(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnMaxPushIdFrame());
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnMaxPushIdFrame());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, SettingsFrame) {
InSequence s;
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"04"
"07"
"01"
"02"
"06"
"05"
"4100"
"04",
&input));
SettingsFrame frame;
frame.values[1] = 2;
frame.values[6] = 5;
frame.values[256] = 4;
absl::string_view remaining_input(input);
EXPECT_CALL(visitor_, OnSettingsFrameStart(2)).WillOnce(Return(false));
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(2u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnSettingsFrame(frame)).WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_THAT(decoder_.decoded_frame_types(), ElementsAre(4));
EXPECT_CALL(visitor_, OnSettingsFrameStart(2));
EXPECT_CALL(visitor_, OnSettingsFrame(frame));
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_THAT(decoder_.decoded_frame_types(), ElementsAre(4, 4));
EXPECT_CALL(visitor_, OnSettingsFrameStart(2));
EXPECT_CALL(visitor_, OnSettingsFrame(frame));
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_THAT(decoder_.decoded_frame_types(), ElementsAre(4, 4, 4));
}
TEST_F(HttpDecoderTest, CorruptSettingsFrame) {
const char* const kPayload =
"\x42\x11"
"\x80\x22\x33\x44"
"\x58\x39"
"\xf0\x22\x33\x44\x55\x66\x77\x88";
struct {
size_t payload_length;
const char* const error_message;
} kTestData[] = {
{1, "Unable to read setting identifier."},
{5, "Unable to read setting value."},
{7, "Unable to read setting identifier."},
{12, "Unable to read setting value."},
};
for (const auto& test_data : kTestData) {
std::string input;
input.push_back(4u);
input.push_back(test_data.payload_length);
const size_t header_length = input.size();
input.append(kPayload, test_data.payload_length);
HttpDecoder decoder(&visitor_);
EXPECT_CALL(visitor_, OnSettingsFrameStart(header_length));
EXPECT_CALL(visitor_, OnError(&decoder));
QuicByteCount processed_bytes =
decoder.ProcessInput(input.data(), input.size());
EXPECT_EQ(input.size(), processed_bytes);
EXPECT_THAT(decoder.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ(test_data.error_message, decoder.error_detail());
}
}
TEST_F(HttpDecoderTest, DuplicateSettingsIdentifier) {
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"04"
"01"
"01"
"01"
"02",
&input));
EXPECT_CALL(visitor_, OnSettingsFrameStart(2));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(),
IsError(QUIC_HTTP_DUPLICATE_SETTING_IDENTIFIER));
EXPECT_EQ("Duplicate setting identifier.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, DataFrame) {
InSequence s;
std::string type_and_length_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"05",
&type_and_length_bytes));
std::string input = absl::StrCat(type_and_length_bytes,
"Data!");
EXPECT_CALL(visitor_, OnDataFrameStart(2, 5)).WillOnce(Return(false));
absl::string_view remaining_input(input);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(2u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("Data!")))
.WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_CALL(visitor_, OnDataFrameEnd()).WillOnce(Return(false));
EXPECT_EQ(0u, ProcessInputWithGarbageAppended(""));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnDataFrameStart(2, 5));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("Data!")));
EXPECT_CALL(visitor_, OnDataFrameEnd());
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnDataFrameStart(2, 5));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("D")));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("a")));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("t")));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("a")));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("!")));
EXPECT_CALL(visitor_, OnDataFrameEnd());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, FrameHeaderPartialDelivery) {
InSequence s;
std::string input(2048, 'x');
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
input.length(), quiche::SimpleBufferAllocator::Get());
EXPECT_EQ(1u, decoder_.ProcessInput(header.data(), 1));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnDataFrameStart(3, input.length()));
EXPECT_EQ(header.size() - 1,
decoder_.ProcessInput(header.data() + 1, header.size() - 1));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view(input)));
EXPECT_CALL(visitor_, OnDataFrameEnd());
EXPECT_EQ(2048u, decoder_.ProcessInput(input.data(), 2048));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, PartialDeliveryOfLargeFrameType) {
const uint64_t frame_type = 0x1f * 0x222 + 0x21;
const QuicByteCount payload_length = 0;
const QuicByteCount header_length =
QuicDataWriter::GetVarInt62Len(frame_type) +
QuicDataWriter::GetVarInt62Len(payload_length);
auto input = std::make_unique<char[]>(header_length);
QuicDataWriter writer(header_length, input.get());
writer.WriteVarInt62(frame_type);
writer.WriteVarInt62(payload_length);
EXPECT_CALL(visitor_,
OnUnknownFrameStart(frame_type, header_length, payload_length));
EXPECT_CALL(visitor_, OnUnknownFrameEnd());
auto raw_input = input.get();
for (uint64_t i = 0; i < header_length; ++i) {
char c = raw_input[i];
EXPECT_EQ(1u, decoder_.ProcessInput(&c, 1));
}
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_EQ(frame_type, current_frame_type());
}
TEST_F(HttpDecoderTest, GoAway) {
InSequence s;
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("07"
"01"
"01",
&input));
EXPECT_CALL(visitor_, OnGoAwayFrame(GoAwayFrame({1})))
.WillOnce(Return(false));
EXPECT_EQ(input.size(), ProcessInputWithGarbageAppended(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnGoAwayFrame(GoAwayFrame({1})));
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnGoAwayFrame(GoAwayFrame({1})));
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, HeadersFrame) {
InSequence s;
std::string type_and_length_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("01"
"07",
&type_and_length_bytes));
std::string input = absl::StrCat(type_and_length_bytes,
"Headers");
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 7)).WillOnce(Return(false));
absl::string_view remaining_input(input);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(2u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("Headers")))
.WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_CALL(visitor_, OnHeadersFrameEnd()).WillOnce(Return(false));
EXPECT_EQ(0u, ProcessInputWithGarbageAppended(""));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 7));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("Headers")));
EXPECT_CALL(visitor_, OnHeadersFrameEnd());
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 7));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("H")));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("e")));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("a")));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("d")));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("e")));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("r")));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("s")));
EXPECT_CALL(visitor_, OnHeadersFrameEnd());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, MetadataFrame) {
InSequence s;
std::string type_and_length_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("404d"
"08",
&type_and_length_bytes));
std::string input = absl::StrCat(type_and_length_bytes,
"Metadata");
EXPECT_CALL(visitor_, OnMetadataFrameStart(3, 8)).WillOnce(Return(false));
absl::string_view remaining_input(input);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(3u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("Metadata")))
.WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_CALL(visitor_, OnMetadataFrameEnd()).WillOnce(Return(false));
EXPECT_EQ(0u, ProcessInputWithGarbageAppended(""));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnMetadataFrameStart(3, 8));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("Metadata")));
EXPECT_CALL(visitor_, OnMetadataFrameEnd());
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnMetadataFrameStart(3, 8));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("M")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("e")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("t")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("a")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("d")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("a")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("t")));
EXPECT_CALL(visitor_, OnMetadataFramePayload(absl::string_view("a")));
EXPECT_CALL(visitor_, OnMetadataFrameEnd());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, EmptyDataFrame) {
InSequence s;
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"00",
&input));
EXPECT_CALL(visitor_, OnDataFrameStart(2, 0)).WillOnce(Return(false));
EXPECT_EQ(input.size(), ProcessInputWithGarbageAppended(input));
EXPECT_CALL(visitor_, OnDataFrameEnd()).WillOnce(Return(false));
EXPECT_EQ(0u, ProcessInputWithGarbageAppended(""));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnDataFrameStart(2, 0));
EXPECT_CALL(visitor_, OnDataFrameEnd());
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnDataFrameStart(2, 0));
EXPECT_CALL(visitor_, OnDataFrameEnd());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, EmptyHeadersFrame) {
InSequence s;
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("01"
"00",
&input));
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 0)).WillOnce(Return(false));
EXPECT_EQ(input.size(), ProcessInputWithGarbageAppended(input));
EXPECT_CALL(visitor_, OnHeadersFrameEnd()).WillOnce(Return(false));
EXPECT_EQ(0u, ProcessInputWithGarbageAppended(""));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 0));
EXPECT_CALL(visitor_, OnHeadersFrameEnd());
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 0));
EXPECT_CALL(visitor_, OnHeadersFrameEnd());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, GoawayWithOverlyLargePayload) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"07"
"10",
&input));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(2u, ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_TOO_LARGE));
EXPECT_EQ("Frame is too large.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, MaxPushIdWithOverlyLargePayload) {
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("0d"
"10",
&input));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(2u, ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_TOO_LARGE));
EXPECT_EQ("Frame is too large.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, FrameWithOverlyLargePayload) {
constexpr size_t max_input_length =
sizeof(uint64_t) +
sizeof(uint64_t) +
sizeof(uint8_t);
char input[max_input_length];
for (uint64_t frame_type = 0; frame_type < 1025; frame_type++) {
::testing::NiceMock<MockHttpDecoderVisitor> visitor;
HttpDecoder decoder(&visitor);
QuicDataWriter writer(max_input_length, input);
ASSERT_TRUE(writer.WriteVarInt62(frame_type));
ASSERT_TRUE(
writer.WriteVarInt62(quiche::kVarInt62MaxValue));
ASSERT_TRUE(writer.WriteUInt8(0x00));
EXPECT_NE(decoder.ProcessInput(input, writer.length()), 0u) << frame_type;
}
}
TEST_F(HttpDecoderTest, MalformedSettingsFrame) {
char input[30];
QuicDataWriter writer(30, input);
writer.WriteUInt8(0x04);
writer.WriteVarInt62(2048 * 1024);
writer.WriteStringPiece("Malformed payload");
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(5u, decoder_.ProcessInput(input, ABSL_ARRAYSIZE(input)));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_TOO_LARGE));
EXPECT_EQ("Frame is too large.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, Http2Frame) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"06"
"05"
"15",
&input));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(1u, ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_RECEIVE_SPDY_FRAME));
EXPECT_EQ("HTTP/2 frame received in a HTTP/3 connection: 6",
decoder_.error_detail());
}
TEST_F(HttpDecoderTest, HeadersPausedThenData) {
InSequence s;
std::string headers_type_and_length_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("01"
"07",
&headers_type_and_length_bytes));
std::string headers = absl::StrCat(headers_type_and_length_bytes, "Headers");
std::string data_type_and_length_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"05",
&data_type_and_length_bytes));
std::string data = absl::StrCat(data_type_and_length_bytes, "Data!");
std::string input = absl::StrCat(headers, data);
EXPECT_CALL(visitor_, OnHeadersFrameStart(2, 7));
EXPECT_CALL(visitor_, OnHeadersFramePayload(absl::string_view("Headers")));
EXPECT_CALL(visitor_, OnHeadersFrameEnd()).WillOnce(Return(false));
absl::string_view remaining_input(input);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(9u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnDataFrameStart(2, 5));
EXPECT_CALL(visitor_, OnDataFramePayload(absl::string_view("Data!")));
EXPECT_CALL(visitor_, OnDataFrameEnd());
processed_bytes = ProcessInput(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, CorruptFrame) {
InSequence s;
struct {
const char* const input;
const char* const error_message;
} kTestData[] = {{"\x0D"
"\x01"
"\x40",
"Unable to read MAX_PUSH_ID push_id."},
{"\x0D"
"\x04"
"\x05"
"foo",
"Superfluous data in MAX_PUSH_ID frame."},
{"\x07"
"\x01"
"\x40",
"Unable to read GOAWAY ID."},
{"\x07"
"\x04"
"\x05"
"foo",
"Superfluous data in GOAWAY frame."},
{"\x40\x89"
"\x01"
"\x40",
"Unable to read ACCEPT_CH origin."},
{"\x40\x89"
"\x01"
"\x05",
"Unable to read ACCEPT_CH origin."},
{"\x40\x89"
"\x04"
"\x05"
"foo",
"Unable to read ACCEPT_CH origin."},
{"\x40\x89"
"\x04"
"\x03"
"foo",
"Unable to read ACCEPT_CH value."},
{"\x40\x89"
"\x05"
"\x03"
"foo"
"\x40",
"Unable to read ACCEPT_CH value."},
{"\x40\x89"
"\x08"
"\x03"
"foo"
"\x05"
"bar",
"Unable to read ACCEPT_CH value."}};
for (const auto& test_data : kTestData) {
{
HttpDecoder decoder(&visitor_);
EXPECT_CALL(visitor_, OnAcceptChFrameStart(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnError(&decoder));
absl::string_view input(test_data.input);
decoder.ProcessInput(input.data(), input.size());
EXPECT_THAT(decoder.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ(test_data.error_message, decoder.error_detail());
}
{
HttpDecoder decoder(&visitor_);
EXPECT_CALL(visitor_, OnAcceptChFrameStart(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnError(&decoder));
absl::string_view input(test_data.input);
for (auto c : input) {
decoder.ProcessInput(&c, 1);
}
EXPECT_THAT(decoder.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ(test_data.error_message, decoder.error_detail());
}
}
}
TEST_F(HttpDecoderTest, EmptySettingsFrame) {
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"00",
&input));
EXPECT_CALL(visitor_, OnSettingsFrameStart(2));
SettingsFrame empty_frame;
EXPECT_CALL(visitor_, OnSettingsFrame(empty_frame));
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, EmptyGoAwayFrame) {
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("07"
"00",
&input));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ("Unable to read GOAWAY ID.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, EmptyMaxPushIdFrame) {
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("0d"
"00",
&input));
EXPECT_CALL(visitor_, OnError(&decoder_));
EXPECT_EQ(input.size(), ProcessInput(input));
EXPECT_THAT(decoder_.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ("Unable to read MAX_PUSH_ID push_id.", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, LargeStreamIdInGoAway) {
GoAwayFrame frame;
frame.id = 1ull << 60;
std::string goaway = HttpEncoder::SerializeGoAwayFrame(frame);
EXPECT_CALL(visitor_, OnGoAwayFrame(frame));
EXPECT_GT(goaway.length(), 0u);
EXPECT_EQ(goaway.length(),
decoder_.ProcessInput(goaway.data(), goaway.length()));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, ObsoletePriorityUpdateFrame) {
const QuicByteCount header_length = 2;
const QuicByteCount payload_length = 3;
InSequence s;
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("0f"
"03"
"666f6f",
&input));
EXPECT_CALL(visitor_,
OnUnknownFrameStart(0x0f, header_length, payload_length));
EXPECT_CALL(visitor_, OnUnknownFramePayload(Eq("foo")));
EXPECT_CALL(visitor_, OnUnknownFrameEnd()).WillOnce(Return(false));
EXPECT_EQ(header_length + payload_length,
ProcessInputWithGarbageAppended(input));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_,
OnUnknownFrameStart(0x0f, header_length, payload_length));
EXPECT_CALL(visitor_, OnUnknownFramePayload(Eq("f")));
EXPECT_CALL(visitor_, OnUnknownFramePayload(Eq("o")));
EXPECT_CALL(visitor_, OnUnknownFramePayload(Eq("o")));
EXPECT_CALL(visitor_, OnUnknownFrameEnd());
ProcessInputCharByChar(input);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, PriorityUpdateFrame) {
InSequence s;
std::string input1;
ASSERT_TRUE(
absl::HexStringToBytes("800f0700"
"01"
"03",
&input1));
PriorityUpdateFrame priority_update1;
priority_update1.prioritized_element_id = 0x03;
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(5)).WillOnce(Return(false));
absl::string_view remaining_input(input1);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(5u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnPriorityUpdateFrame(priority_update1))
.WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(5));
EXPECT_CALL(visitor_, OnPriorityUpdateFrame(priority_update1));
EXPECT_EQ(input1.size(), ProcessInput(input1));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(5));
EXPECT_CALL(visitor_, OnPriorityUpdateFrame(priority_update1));
ProcessInputCharByChar(input1);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
std::string input2;
ASSERT_TRUE(
absl::HexStringToBytes("800f0700"
"04"
"05"
"666f6f",
&input2));
PriorityUpdateFrame priority_update2;
priority_update2.prioritized_element_id = 0x05;
priority_update2.priority_field_value = "foo";
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(5)).WillOnce(Return(false));
remaining_input = input2;
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(5u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnPriorityUpdateFrame(priority_update2))
.WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(5));
EXPECT_CALL(visitor_, OnPriorityUpdateFrame(priority_update2));
EXPECT_EQ(input2.size(), ProcessInput(input2));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(5));
EXPECT_CALL(visitor_, OnPriorityUpdateFrame(priority_update2));
ProcessInputCharByChar(input2);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, CorruptPriorityUpdateFrame) {
std::string payload;
ASSERT_TRUE(absl::HexStringToBytes("4005",
&payload));
struct {
size_t payload_length;
const char* const error_message;
} kTestData[] = {
{0, "Unable to read prioritized element id."},
{1, "Unable to read prioritized element id."},
};
for (const auto& test_data : kTestData) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("800f0700",
&input));
input.push_back(test_data.payload_length);
size_t header_length = input.size();
input.append(payload.data(), test_data.payload_length);
HttpDecoder decoder(&visitor_);
EXPECT_CALL(visitor_, OnPriorityUpdateFrameStart(header_length));
EXPECT_CALL(visitor_, OnError(&decoder));
QuicByteCount processed_bytes =
decoder.ProcessInput(input.data(), input.size());
EXPECT_EQ(input.size(), processed_bytes);
EXPECT_THAT(decoder.error(), IsError(QUIC_HTTP_FRAME_ERROR));
EXPECT_EQ(test_data.error_message, decoder.error_detail());
}
}
TEST_F(HttpDecoderTest, AcceptChFrame) {
InSequence s;
std::string input1;
ASSERT_TRUE(
absl::HexStringToBytes("4089"
"00",
&input1));
AcceptChFrame accept_ch1;
EXPECT_CALL(visitor_, OnAcceptChFrameStart(3)).WillOnce(Return(false));
absl::string_view remaining_input(input1);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(3u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnAcceptChFrame(accept_ch1)).WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnAcceptChFrameStart(3));
EXPECT_CALL(visitor_, OnAcceptChFrame(accept_ch1));
EXPECT_EQ(input1.size(), ProcessInput(input1));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnAcceptChFrameStart(3));
EXPECT_CALL(visitor_, OnAcceptChFrame(accept_ch1));
ProcessInputCharByChar(input1);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
std::string input2;
ASSERT_TRUE(
absl::HexStringToBytes("4089"
"08"
"03"
"666f6f"
"03"
"626172",
&input2));
AcceptChFrame accept_ch2;
accept_ch2.entries.push_back({"foo", "bar"});
EXPECT_CALL(visitor_, OnAcceptChFrameStart(3)).WillOnce(Return(false));
remaining_input = input2;
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(3u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnAcceptChFrame(accept_ch2)).WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnAcceptChFrameStart(3));
EXPECT_CALL(visitor_, OnAcceptChFrame(accept_ch2));
EXPECT_EQ(input2.size(), ProcessInput(input2));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnAcceptChFrameStart(3));
EXPECT_CALL(visitor_, OnAcceptChFrame(accept_ch2));
ProcessInputCharByChar(input2);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, OriginFrame) {
if (!GetQuicReloadableFlag(enable_h3_origin_frame)) {
return;
}
InSequence s;
std::string input1;
ASSERT_TRUE(
absl::HexStringToBytes("0C"
"00",
&input1));
OriginFrame origin1;
EXPECT_CALL(visitor_, OnOriginFrameStart(2)).WillOnce(Return(false));
absl::string_view remaining_input(input1);
QuicByteCount processed_bytes =
ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(2u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnOriginFrame(origin1)).WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnOriginFrameStart(2));
EXPECT_CALL(visitor_, OnOriginFrame(origin1));
EXPECT_EQ(input1.size(), ProcessInput(input1));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnOriginFrameStart(2));
EXPECT_CALL(visitor_, OnOriginFrame(origin1));
ProcessInputCharByChar(input1);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
std::string input2;
ASSERT_TRUE(
absl::HexStringToBytes("0C"
"0A"
"0003"
"666f6f"
"0003"
"626172",
&input2));
ASSERT_EQ(12, input2.length());
OriginFrame origin2;
origin2.origins = {"foo", "bar"};
EXPECT_CALL(visitor_, OnOriginFrameStart(2)).WillOnce(Return(false));
remaining_input = input2;
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(2u, processed_bytes);
remaining_input = remaining_input.substr(processed_bytes);
EXPECT_CALL(visitor_, OnOriginFrame(origin2)).WillOnce(Return(false));
processed_bytes = ProcessInputWithGarbageAppended(remaining_input);
EXPECT_EQ(remaining_input.size(), processed_bytes);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnOriginFrameStart(2));
EXPECT_CALL(visitor_, OnOriginFrame(origin2));
EXPECT_EQ(input2.size(), ProcessInput(input2));
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
EXPECT_CALL(visitor_, OnOriginFrameStart(2));
EXPECT_CALL(visitor_, OnOriginFrame(origin2));
ProcessInputCharByChar(input2);
EXPECT_THAT(decoder_.error(), IsQuicNoError());
EXPECT_EQ("", decoder_.error_detail());
}
TEST_F(HttpDecoderTest, OriginFrameDisabled) {
if (GetQuicReloadableFlag(enable_h3_origin_frame)) {
return;
}
InSequence s;
std::string input1;
ASSERT_TRUE(
absl::HexStringToBytes("0C"
"00",
&input1));
EXPECT_CALL(visitor_, OnUnknownFrameStart(0x0C, 2, 0));
EXPECT_CALL(visitor_, OnUnknownFrameEnd());
EXPECT_EQ(ProcessInput(input1), input1.size());
std::string input2;
ASSERT_TRUE(
absl::HexStringToBytes("0C"
"0A"
"0003"
"666f6f"
"0003"
"626172",
&input2));
EXPECT_CALL(visitor_, OnUnknownFrameStart(0x0C, 2, input2.size() - 2));
EXPECT_CALL(visitor_, OnUnknownFramePayload(input2.substr(2)));
EXPECT_CALL(visitor_, OnUnknownFrameEnd());
EXPECT_EQ(ProcessInput(input2), input2.size());
}
TEST_F(HttpDecoderTest, WebTransportStreamDisabled) {
InSequence s;
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("40414104", &input));
EXPECT_CALL(visitor_, OnUnknownFrameStart(0x41, input.size(), 0x104));
EXPECT_EQ(ProcessInput(input), input.size());
}
TEST(HttpDecoderTestNoFixture, WebTransportStream) {
testing::StrictMock<MockHttpDecoderVisitor> visitor;
HttpDecoder decoder(&visitor);
decoder.EnableWebTransportStreamParsing();
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("40414104ffffffff", &input));
EXPECT_CALL(visitor, OnWebTransportStreamFrameType(4, 0x104));
QuicByteCount bytes = decoder.ProcessInput(input.data(), input.size());
EXPECT_EQ(bytes, 4u);
}
TEST(HttpDecoderTestNoFixture, WebTransportStreamError) {
testing::StrictMock<MockHttpDecoderVisitor> visitor;
HttpDecoder decoder(&visitor);
decoder.EnableWebTransportStreamParsing();
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("404100", &input));
EXPECT_CALL(visitor, OnWebTransportStreamFrameType(_, _));
decoder.ProcessInput(input.data(), input.size());
EXPECT_QUIC_BUG(
{
EXPECT_CALL(visitor, OnError(_));
decoder.ProcessInput(input.data(), input.size());
},
"HttpDecoder called after an indefinite-length frame");
}
TEST_F(HttpDecoderTest, DecodeSettings) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"04"
"07"
"01"
"02"
"06"
"05"
"4100"
"04",
&input));
SettingsFrame frame;
frame.values[1] = 2;
frame.values[6] = 5;
frame.values[256] = 4;
SettingsFrame out;
EXPECT_TRUE(HttpDecoder::DecodeSettings(input.data(), input.size(), &out));
EXPECT_EQ(frame, out);
ASSERT_TRUE(
absl::HexStringToBytes("0D"
"01"
"01",
&input));
EXPECT_FALSE(HttpDecoder::DecodeSettings(input.data(), input.size(), &out));
ASSERT_TRUE(absl::HexStringToBytes(
"04"
"01"
"42",
&input));
EXPECT_FALSE(HttpDecoder::DecodeSettings(input.data(), input.size(), &out));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/http_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/http_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a1838971-fec5-40fa-ae1d-8854e5cd60e1 | cpp | google/tensorstore | constant_vector | tensorstore/util/constant_vector.cc | tensorstore/util/constant_vector_test.cc | #include "tensorstore/util/constant_vector.h"
#include <string>
#include "tensorstore/rank.h"
namespace tensorstore {
namespace internal_constant_vector {
const std::string kStringArray[kMaxRank] = {};
}
} | #include "tensorstore/util/constant_vector.h"
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::GetConstantVector;
using ::tensorstore::span;
TEST(GetConstantVectorTest, RunTimeLengthInt) {
auto x = GetConstantVector<int, 3>(5);
static_assert(std::is_same_v<decltype(x), span<const int>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, ZeroRunTimeLengthInt) {
auto x = GetConstantVector<int, 3>(0);
static_assert(std::is_same_v<decltype(x), span<const int>>);
EXPECT_EQ(0, x.size());
}
TEST(GetConstantVectorTest, StaticLengthInt) {
constexpr auto x = GetConstantVector<int, 3, 5>();
static_assert(std::is_same_v<decltype(x), const span<const int, 5>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, StaticLengthIntUsingStaticRankValue) {
constexpr auto x = GetConstantVector<int, 3>(tensorstore::StaticRank<5>{});
static_assert(std::is_same_v<decltype(x), const span<const int, 5>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, StaticZeroLengthInt) {
constexpr auto x = GetConstantVector<int, 3, 0>();
static_assert(std::is_same_v<decltype(x), const span<const int, 0>>);
}
TEST(GetDefaultStringVectorTest, StaticLength) {
auto x = tensorstore::GetDefaultStringVector<2>();
static_assert(std::is_same_v<decltype(x), span<const std::string, 2>>);
EXPECT_THAT(x, ::testing::ElementsAre("", ""));
}
TEST(GetDefaultStringVectorTest, DynamicLength) {
auto x = tensorstore::GetDefaultStringVector(2);
static_assert(std::is_same_v<decltype(x), span<const std::string>>);
EXPECT_THAT(x, ::testing::ElementsAre("", ""));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_vector.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_vector_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f8ba3194-ffe1-415c-af89-f97e6987e7a8 | cpp | google/tensorstore | transform_array | tensorstore/index_space/internal/transform_array.cc | tensorstore/index_space/transform_array_test.cc | #include "tensorstore/index_space/internal/transform_array.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
namespace tensorstore {
namespace internal_index_space {
Result<SharedElementPointer<const void>> TransformArraySubRegion(
const SharedArrayView<const void, dynamic_rank, offset_origin>& array,
TransformRep* transform, const Index* result_origin,
const Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
for (DimensionIndex i = 0; i < input_rank; ++i) {
if (result_shape[i] == 0) {
std::fill_n(result_byte_strides, input_rank, 0);
return SharedElementPointer<const void>(std::shared_ptr<const void>(),
array.dtype());
}
}
namespace flags = input_dimension_iteration_flags;
flags::Bitmask input_dimension_flags[kMaxRank];
std::fill_n(
&input_dimension_flags[0], input_rank,
flags::GetDefaultBitmask(constraints.repeated_elements_constraint()));
SingleArrayIterationState single_array_states[2];
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::InitializeSingleArrayIterationState(
array,
transform,
result_origin,
result_shape, &single_array_states[0],
&input_dimension_flags[0]));
if (single_array_states[0].num_array_indexed_output_dimensions == 0) {
if (constraints.allocate_constraint() != must_allocate) {
std::copy_n(&single_array_states[0].input_byte_strides[0], input_rank,
result_byte_strides);
return SharedElementPointer<void>(
std::shared_ptr<void>(array.pointer(),
single_array_states[0].base_pointer),
array.element_pointer().dtype());
}
const StridedLayoutView<> source_layout(
input_rank, result_shape,
&single_array_states[0].input_byte_strides[0]);
const StridedLayoutView<> new_layout(input_rank, result_shape,
result_byte_strides);
auto element_pointer = internal::AllocateArrayLike(
array.element_pointer().dtype(), source_layout, result_byte_strides,
constraints.iteration_constraints(), default_init);
CopyArray(ArrayView<const void>(
ElementPointer<void>(single_array_states[0].base_pointer,
array.element_pointer().dtype()),
source_layout),
ArrayView<void>(element_pointer, new_layout));
return element_pointer;
}
MarkSingletonDimsAsSkippable(span(result_shape, input_rank),
&input_dimension_flags[0]);
SharedElementPointer<void> new_element_pointer;
if (constraints.order_constraint()) {
Index new_shape[kMaxRank];
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
new_shape[input_dim] = input_dimension_flags[input_dim] == flags::can_skip
? 1
: result_shape[input_dim];
}
ComputeStrides(constraints.order_constraint().order(), array.dtype()->size,
span<const Index>(&new_shape[0], input_rank),
span(result_byte_strides, input_rank));
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (new_shape[input_dim] <= 1) result_byte_strides[input_dim] = 0;
}
const Index new_origin_offset =
IndexInnerProduct(input_rank, result_byte_strides, result_origin);
new_element_pointer = internal::AllocateAndConstructSharedElements(
ProductOfExtents(span<const Index>(new_shape, input_rank)),
default_init, array.dtype());
const absl::Status init_status =
internal_index_space::InitializeSingleArrayIterationState(
ArrayView<void, dynamic_rank, offset_origin>(
AddByteOffset(ElementPointer<void>(new_element_pointer),
-new_origin_offset),
StridedLayoutView<dynamic_rank, offset_origin>(
input_rank, result_origin, &new_shape[0],
result_byte_strides)),
nullptr,
result_origin,
result_shape, &single_array_states[1],
&input_dimension_flags[0]);
assert(init_status.ok());
}
DimensionIterationOrder base_layout =
constraints.order_constraint()
? ComputeDimensionIterationOrder<2>(
single_array_states,
span(input_dimension_flags).first(input_rank),
{})
: ComputeDimensionIterationOrder<1>(
{&single_array_states[0], 1},
span(input_dimension_flags).first(input_rank),
{});
if (!constraints.order_constraint()) {
Index new_shape[kMaxRank];
Index new_byte_strides[kMaxRank];
for (DimensionIndex i = 0; i < base_layout.pure_strided_end_dim; ++i) {
const DimensionIndex input_dim = base_layout.input_dimension_order[i];
new_shape[i] = result_shape[input_dim];
}
std::fill_n(result_byte_strides, input_rank, 0);
ComputeStrides(
ContiguousLayoutOrder::c, array.dtype()->size,
span<const Index>(&new_shape[0], base_layout.pure_strided_end_dim),
span<Index>(&new_byte_strides[0], base_layout.pure_strided_end_dim));
for (DimensionIndex i = 0; i < base_layout.pure_strided_end_dim; ++i) {
const DimensionIndex input_dim = base_layout.input_dimension_order[i];
result_byte_strides[input_dim] = new_byte_strides[i];
}
new_element_pointer = internal::AllocateAndConstructSharedElements(
ProductOfExtents(
span<const Index>(&new_shape[0], base_layout.pure_strided_end_dim)),
default_init, array.dtype());
const Index new_origin_offset =
IndexInnerProduct(input_rank, result_byte_strides, result_origin);
const absl::Status init_status =
internal_index_space::InitializeSingleArrayIterationState(
ArrayView<void, dynamic_rank, offset_origin>(
AddByteOffset(ElementPointer<void>(new_element_pointer),
-new_origin_offset),
StridedLayoutView<dynamic_rank, offset_origin>(
input_rank, result_origin, &new_shape[0],
result_byte_strides)),
nullptr,
result_origin,
result_shape, &single_array_states[1],
&input_dimension_flags[0]);
assert(init_status.ok());
}
SimplifiedDimensionIterationOrder layout = SimplifyDimensionIterationOrder<2>(
base_layout, span(result_shape, input_rank), single_array_states);
const std::array<std::ptrdiff_t, 2> element_sizes{array.dtype()->size,
array.dtype()->size};
[[maybe_unused]] const bool success = IterateUsingSimplifiedLayout<2>(
layout, span(result_shape, input_rank),
{&array.dtype()->copy_assign, nullptr},
nullptr, single_array_states, element_sizes);
assert(success);
return new_element_pointer;
}
Result<SharedElementPointer<const void>> TransformArrayPreservingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_origin, Index* result_shape,
Index* result_byte_strides, TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
TENSORSTORE_RETURN_IF_ERROR(PropagateExplicitBounds(
array.domain(),
transform,
MutableBoxView<>(input_rank, result_origin, result_shape)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto element_pointer,
TransformArraySubRegion(array, transform, result_origin, result_shape,
result_byte_strides, constraints));
return AddByteOffset(std::move(element_pointer),
-IndexInnerProduct(transform->input_rank,
result_byte_strides, result_origin));
}
Result<SharedElementPointer<const void>> TransformArrayDiscardingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
Index result_origin[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(PropagateExplicitBounds(
array.domain(),
transform,
MutableBoxView<>(input_rank, &result_origin[0], result_shape)));
return TransformArraySubRegion(array, transform, &result_origin[0],
result_shape, result_byte_strides,
constraints);
}
}
} | #include "tensorstore/index_space/internal/transform_array.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesStatus;
TEST(TransformArrayTest, OneDimensionalIdentity) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array, IdentityTransform<1>())
.value();
EXPECT_EQ(original_array, new_array);
}
TEST(TransformArrayTest, OneDimensionalIdentityWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array, IdentityTransform<1>())
.value();
EXPECT_EQ(original_array, new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceUnstrided) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array = tensorstore::TransformArray(
original_array, IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(1), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceUnstridedWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array,
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 5, 1, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(6), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceStrided) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array, IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(1), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 4}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceStridedWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array,
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 4, 2, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(6), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 4}), new_array);
}
TEST(TransformArrayTest, OneDArrayOneDIndexArray) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, 1, 1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({2}, {2, 4, 4, 3}), new_array);
}
TEST(TransformArrayTest, OneDArrayOneDIndexArray1025) {
constexpr Index kSize = 1025;
auto index_array = tensorstore::AllocateArray<Index>({kSize});
for (Index i = 0; i < kSize; ++i) index_array(i) = i;
auto new_array =
tensorstore::TransformArray(index_array,
IndexTransformBuilder<1, 1>()
.input_shape({kSize})
.output_index_array(0, 0, 1, index_array)
.Finalize()
.value())
.value();
EXPECT_EQ(index_array, new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayRetainZeroStride) {
auto index_array = tensorstore::MakeArray<Index>({0, 1, 2, 3, 4});
tensorstore::SharedArray<Index, 2> index_array2;
index_array2.element_pointer() = index_array.element_pointer();
index_array2.shape()[0] = 5;
index_array2.shape()[1] = 2;
index_array2.byte_strides()[0] = index_array.byte_strides()[0];
index_array2.byte_strides()[1] = 0;
EXPECT_EQ(index_array2,
MakeArray<Index>({{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}}));
auto new_array =
tensorstore::TransformArray(index_array2,
IndexTransformBuilder<2, 2>()
.input_shape({5, 2})
.output_index_array(0, 0, 1, index_array2)
.output_single_input_dimension(1, 1)
.Finalize()
.value())
.value();
EXPECT_EQ(index_array2, new_array);
EXPECT_EQ(index_array2.layout(), new_array.layout());
}
TEST(TransformArrayTest, IndexArrayBoundsOverflow) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
EXPECT_THAT(tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, std::numeric_limits<Index>::min(),
1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Integer overflow propagating range.*"));
}
TEST(TransformArrayTest, OneDArrayOneDIndexArrayWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, 6, 1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({2}, {2, 4, 4, 3}), new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArray) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, -1, 1, 0)
.output_index_array(1, 1, 1, MakeArray<Index>({{0, 2, 2, 1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{2, 4, 4, 3}, {6, 8, 8, 7}}),
new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayWithOrigin) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 4, 1, 0)
.output_index_array(1, 7, 1, MakeArray<Index>({{0, 2, 2, 1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{2, 4, 4, 3}, {6, 8, 8, 7}}),
new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayStrided) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
}
TEST(TransformArrayTest, ArrayIndexOutOfBounds) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
EXPECT_THAT(
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, 2, 1, 0}}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 2 is outside valid range \\[0, 2\\).*"));
EXPECT_THAT(
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, -1, 1, 0}}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index -1 is outside valid range \\[0, 2\\).*"));
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayStridedWithOrigin) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2));
}
TEST(TransformArrayTest, IncludeRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::include_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(
new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 2, sizeof(int) * 4, sizeof(int)));
}
TEST(TransformArrayTest, SkipSingleton) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 1})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::skip_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3},
{{{6}, {8}, {8}, {6}}, {{2}, {4}, {4}, {2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2, 0));
}
TEST(TransformArrayTest, SkipRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::skip_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2, 0));
}
TEST(TransformArrayTest, OrderConstraint) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value(),
tensorstore::c_order)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 4, sizeof(int)));
}
TEST(TransformArrayTest, OrderConstraintIncludeRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
{tensorstore::c_order, tensorstore::include_repeated_elements})
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(
new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 8, sizeof(int) * 2, sizeof(int)));
}
TEST(TransformArrayTest, OrderConstraintSkipRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
{tensorstore::c_order, tensorstore::skip_repeated_elements})
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 4, sizeof(int), 0));
}
TEST(TransformArrayTest, MultipleArrayIndexedDimensions) {
auto original_array = tensorstore::MakeArray<int>({{1, 2}, {5, 6}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1}}))
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeArray<int>({{1, 5}, {2, 6}}), new_array);
}
TEST(TransformArrayTest, EmptyDomain) {
auto original_array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
(IndexTransformBuilder<2, 2>()
.input_shape({0, 3})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1, 2}}))
.Finalize()));
EXPECT_THAT(tensorstore::TransformArray(original_array, transform),
::testing::Optional(tensorstore::AllocateArray<int>({0, 3})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transform_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2b46fc07-d111-4925-b26a-7aae4df750d2 | cpp | google/tensorstore | output_index_map | tensorstore/index_space/output_index_map.h | tensorstore/index_space/output_index_map_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_OUTPUT_INDEX_MAP_H_
#define TENSORSTORE_INDEX_SPACE_OUTPUT_INDEX_MAP_H_
#include <cassert>
#include "tensorstore/array.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
namespace tensorstore {
template <DimensionIndex InputRank = dynamic_rank>
class OutputIndexMapRef {
public:
class IndexArrayView {
public:
SharedArrayView<const Index, InputRank, offset_origin> shared_array_ref()
const {
return {element_pointer(), layout()};
}
ArrayView<const Index, InputRank, offset_origin> array_ref() const {
return {element_pointer(), layout()};
}
const SharedElementPointer<const Index>& element_pointer() const {
return index_array_data_->element_pointer;
}
IndexInterval index_range() const { return index_array_data_->index_range; }
StaticOrDynamicRank<InputRank> rank() const {
return StaticRankCast<InputRank, unchecked>(
static_cast<DimensionIndex>(rep_->input_rank));
}
StridedLayoutView<InputRank, offset_origin> layout() const {
return StridedLayoutView<InputRank, offset_origin>(
rank(), rep_->input_origin().data(), rep_->input_shape().data(),
index_array_data_->byte_strides);
}
span<const Index, InputRank> byte_strides() const {
return {index_array_data_->byte_strides, rank()};
}
private:
template <DimensionIndex>
friend class OutputIndexMapRef;
explicit IndexArrayView(
internal_index_space::IndexArrayData* index_array_data,
internal_index_space::TransformRep* rep)
: index_array_data_(index_array_data), rep_(rep) {}
internal_index_space::IndexArrayData* index_array_data_;
internal_index_space::TransformRep* rep_;
};
OutputIndexMapRef() = default;
OutputIndexMapRef& operator=(const OutputIndexMapRef&) = default;
StaticOrDynamicRank<InputRank> input_rank() const {
return StaticRankCast<InputRank, unchecked>(
static_cast<DimensionIndex>(rep_->input_rank));
}
OutputIndexMethod method() const { return map_->method(); }
Index offset() const { return map_->offset(); }
Index stride() const { return map_->stride(); }
DimensionIndex input_dimension() const { return map_->input_dimension(); }
IndexArrayView index_array() const {
return IndexArrayView(&map_->index_array_data(), rep_);
}
private:
template <DimensionIndex, DimensionIndex, ContainerKind>
friend class OutputIndexMapRange;
template <DimensionIndex>
friend class OutputIndexMapIterator;
explicit OutputIndexMapRef(internal_index_space::OutputIndexMap* map,
internal_index_space::TransformRep* rep)
: map_(map), rep_(rep) {}
internal_index_space::OutputIndexMap* map_ = nullptr;
internal_index_space::TransformRep* rep_ = nullptr;
};
template <DimensionIndex InputRank = dynamic_rank>
class OutputIndexMapIterator {
public:
using value_type = OutputIndexMapRef<InputRank>;
using reference = OutputIndexMapRef<InputRank>;
using difference_type = DimensionIndex;
using pointer = value_type*;
using iterator_category = std::random_access_iterator_tag;
OutputIndexMapIterator() = default;
OutputIndexMapRef<InputRank> operator*() const { return ref_; }
const OutputIndexMapRef<InputRank>* operator->() const { return &ref_; }
OutputIndexMapRef<InputRank> operator[](DimensionIndex n) const {
auto new_ref = ref_;
new_ref.map_ += n;
return new_ref;
}
OutputIndexMapIterator& operator+=(DimensionIndex n) {
ref_.map_ += n;
return *this;
}
OutputIndexMapIterator& operator-=(DimensionIndex n) { return *this += (-n); }
OutputIndexMapIterator& operator++() {
++ref_.map_;
return *this;
}
OutputIndexMapIterator& operator--() {
--ref_.map_;
return *this;
}
OutputIndexMapIterator operator++(int) {
auto temp = *this;
++ref_.map_;
return temp;
}
OutputIndexMapIterator operator--(int) {
auto temp = *this;
--ref_.map_;
return temp;
}
friend DimensionIndex operator-(OutputIndexMapIterator a,
OutputIndexMapIterator b) {
return a.map() - b.map();
}
friend OutputIndexMapIterator operator+(OutputIndexMapIterator it,
DimensionIndex n) {
it += n;
return it;
}
friend OutputIndexMapIterator operator+(DimensionIndex n,
OutputIndexMapIterator it) {
it += n;
return it;
}
friend OutputIndexMapIterator operator-(OutputIndexMapIterator it,
DimensionIndex n) {
it -= n;
return it;
}
friend bool operator==(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() == b.map();
}
friend bool operator!=(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() != b.map();
}
friend bool operator<(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() < b.map();
}
friend bool operator<=(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() <= b.map();
}
friend bool operator>(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() > b.map();
}
friend bool operator>=(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() >= b.map();
}
private:
internal_index_space::OutputIndexMap* map() const { return ref_.map_; }
template <DimensionIndex, DimensionIndex, ContainerKind>
friend class OutputIndexMapRange;
OutputIndexMapRef<InputRank> ref_;
explicit OutputIndexMapIterator(internal_index_space::OutputIndexMap* map,
internal_index_space::TransformRep* rep)
: ref_(map, rep) {}
};
template <DimensionIndex InputRank = dynamic_rank,
DimensionIndex OutputRank = dynamic_rank, ContainerKind CKind = view>
class OutputIndexMapRange {
public:
using value_type = OutputIndexMapRef<InputRank>;
using reference = value_type;
using iterator = OutputIndexMapIterator<InputRank>;
using difference_type = DimensionIndex;
constexpr static DimensionIndex extent = OutputRank;
OutputIndexMapRange() = default;
explicit OutputIndexMapRange(
IndexTransform<InputRank, OutputRank, CKind> transform)
: transform_(std::move(transform)) {}
template <DimensionIndex OtherInputRank, DimensionIndex OtherOutputRank,
ContainerKind OtherCKind,
typename = std::enable_if_t<
(RankConstraint::Implies(OtherInputRank, InputRank) &&
RankConstraint::Implies(OtherOutputRank, OutputRank))>>
OutputIndexMapRange(
OutputIndexMapRange<OtherInputRank, OtherOutputRank, OtherCKind> other)
: transform_(std::move(other.transform_)) {}
StaticOrDynamicRank<OutputRank> size() const {
return transform_.output_rank();
}
bool empty() const { return size() == 0; }
iterator begin() const {
return iterator(rep()->output_index_maps().data(), rep());
}
iterator end() const {
return iterator(rep()->output_index_maps().data() + size(), rep());
}
OutputIndexMapRef<InputRank> operator[](DimensionIndex output_dim) const {
assert(output_dim >= 0 && output_dim < size());
return OutputIndexMapRef<InputRank>(
rep()->output_index_maps().data() + output_dim, rep());
}
StaticOrDynamicRank<InputRank> input_rank() const {
return transform_.input_rank();
}
private:
template <DimensionIndex, DimensionIndex, ContainerKind>
friend class OutputIndexMapRange;
internal_index_space::TransformRep* rep() const {
return internal_index_space::TransformAccess::rep(transform_);
}
IndexTransform<InputRank, OutputRank, CKind> transform_;
};
}
#endif | #include "tensorstore/index_space/output_index_map.h"
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::offset_origin;
using ::tensorstore::OutputIndexMapIterator;
using ::tensorstore::OutputIndexMapRange;
using ::tensorstore::OutputIndexMapRef;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::span;
using ::tensorstore::StaticRank;
using ::tensorstore::StridedLayout;
TEST(OutputIndexMethodTest, Ostream) {
EXPECT_EQ("constant", tensorstore::StrCat(OutputIndexMethod::constant));
EXPECT_EQ("single_input_dimension",
tensorstore::StrCat(OutputIndexMethod::single_input_dimension));
EXPECT_EQ("array", tensorstore::StrCat(OutputIndexMethod::array));
EXPECT_EQ("<unknown>",
tensorstore::StrCat(static_cast<OutputIndexMethod>(-1)));
}
TEST(OutputIndexMapTest, StaticRanks) {
auto index_array = MakeOffsetArray<Index>({1, 2, 3}, {{{5}, {6}, {7}, {8}}});
auto t = IndexTransformBuilder<3, 4>()
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3, index_array,
IndexInterval::Closed(3, 10))
.Finalize()
.value();
auto range = t.output_index_maps();
static_assert(std::is_same_v<decltype(range), OutputIndexMapRange<3, 4>>);
static_assert(std::is_same_v<StaticRank<4>, decltype(range.size())>);
static_assert(std::is_same_v<StaticRank<3>, decltype(range.input_rank())>);
EXPECT_EQ(4, range.size());
EXPECT_EQ(3, range.input_rank());
EXPECT_EQ(false, range.empty());
auto it = range.begin();
static_assert(std::is_same_v<OutputIndexMapIterator<3>, decltype(it)>);
EXPECT_EQ(range.begin(), it);
EXPECT_NE(range.end(), it);
EXPECT_EQ(range.end(), range.end());
{
auto output0 = *it;
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output0)>);
EXPECT_EQ(OutputIndexMethod::constant, output0.method());
EXPECT_EQ(10, output0.offset());
}
{
auto it0 = it;
EXPECT_EQ(&++it0, &it0);
EXPECT_EQ(20, it0->offset());
EXPECT_EQ(&--it0, &it0);
EXPECT_EQ(10, it0->offset());
}
{
auto it0 = it + 1;
EXPECT_EQ(20, it0->offset());
it0 = 2 + it;
EXPECT_EQ(30, it0->offset());
it0 = it0 - 2;
EXPECT_EQ(10, it0->offset());
}
{
auto it0 = it + 1;
EXPECT_EQ(1, it0 - it);
EXPECT_EQ(-1, it - it0);
EXPECT_TRUE(it < it0);
EXPECT_TRUE(it <= it0);
EXPECT_TRUE(it != it0);
EXPECT_FALSE(it == it0);
EXPECT_FALSE(it >= it0);
EXPECT_FALSE(it > it0);
EXPECT_FALSE(it0 < it);
EXPECT_FALSE(it0 <= it);
EXPECT_TRUE(it0 != it);
EXPECT_FALSE(it0 == it);
EXPECT_TRUE(it0 >= it);
EXPECT_TRUE(it0 > it);
EXPECT_FALSE(it < it);
EXPECT_TRUE(it <= it);
EXPECT_FALSE(it != it);
EXPECT_TRUE(it == it);
EXPECT_TRUE(it >= it);
EXPECT_FALSE(it > it);
}
{
auto it0 = it;
auto it1 = it0++;
EXPECT_EQ(it1, it);
EXPECT_EQ(it0, it + 1);
EXPECT_EQ(10, it1->offset());
EXPECT_EQ(20, it0->offset());
auto it2 = it0--;
EXPECT_EQ(it2, it + 1);
EXPECT_EQ(it0, it);
}
++it;
{
auto output1 = *it;
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1.method());
EXPECT_EQ(2, output1.input_dimension());
EXPECT_EQ(20, output1.offset());
EXPECT_EQ(2, output1.stride());
}
{
auto output1a = range.begin()[1];
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output1a)>);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1a.method());
EXPECT_EQ(2, output1a.input_dimension());
EXPECT_EQ(20, output1a.offset());
EXPECT_EQ(2, output1a.stride());
}
{
auto output1b = range[1];
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output1b)>);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1b.method());
EXPECT_EQ(2, output1b.input_dimension());
EXPECT_EQ(20, output1b.offset());
EXPECT_EQ(2, output1b.stride());
}
{
auto output1c = t.output_index_map(1);
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output1c)>);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1c.method());
EXPECT_EQ(2, output1c.input_dimension());
EXPECT_EQ(20, output1c.offset());
EXPECT_EQ(2, output1c.stride());
}
++it;
{
auto output2 = *it;
EXPECT_EQ(OutputIndexMethod::array, output2.method());
EXPECT_EQ(30, output2.offset());
EXPECT_EQ(3, output2.stride());
auto index_array_ref = output2.index_array();
EXPECT_EQ(&index_array(1, 2, 3), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
index_array_ref.index_range());
static_assert(
std::is_same_v<StaticRank<3>, decltype(index_array_ref.rank())>);
const StridedLayout<3, offset_origin> expected_layout(
{1, 2, 3}, {4, 4, 3}, {0, sizeof(Index), 0});
EXPECT_EQ(expected_layout, index_array_ref.layout());
EXPECT_EQ(&index_array(1, 2, 3),
&index_array_ref.shared_array_ref()(1, 2, 3));
EXPECT_EQ(expected_layout, index_array_ref.shared_array_ref().layout());
EXPECT_EQ(expected_layout, index_array_ref.array_ref().layout());
EXPECT_THAT(index_array_ref.byte_strides(),
testing::ElementsAreArray(expected_layout.byte_strides()));
EXPECT_EQ(0, index_array_ref.byte_strides()[0]);
EXPECT_EQ(sizeof(Index), index_array_ref.byte_strides()[1]);
EXPECT_EQ(0, index_array_ref.byte_strides()[2]);
}
++it;
{
auto output3 = *it;
EXPECT_EQ(OutputIndexMethod::constant, output3.method());
EXPECT_EQ(0, output3.offset());
}
++it;
EXPECT_EQ(range.end(), it);
}
TEST(OutputIndexMapTest, ZeroRank) {
auto t = IndexTransformBuilder<3, 0>()
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.Finalize()
.value();
auto range = t.output_index_maps();
EXPECT_EQ(0, range.size());
EXPECT_EQ(3, range.input_rank());
EXPECT_TRUE(range.empty());
}
TEST(OutputIndexMapTest, DynamicRanks) {
auto index_array = MakeOffsetArray<Index>({1, 2, 3}, {{{5}, {6}, {7}, {8}}});
auto t = IndexTransformBuilder<>(3, 4)
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3, index_array,
IndexInterval::Closed(3, 10))
.Finalize()
.value();
auto range = t.output_index_maps();
static_assert(std::is_same_v<decltype(range), OutputIndexMapRange<>>);
EXPECT_EQ(4, range.size());
EXPECT_EQ(3, range.input_rank());
EXPECT_EQ(false, range.empty());
auto it = range.begin();
static_assert(std::is_same_v<OutputIndexMapIterator<>, decltype(it)>);
{
auto output0 = *it;
static_assert(std::is_same_v<OutputIndexMapRef<>, decltype(output0)>);
EXPECT_EQ(OutputIndexMethod::constant, output0.method());
EXPECT_EQ(10, output0.offset());
}
{
auto output2 = range[2];
static_assert(std::is_same_v<OutputIndexMapRef<>, decltype(output2)>);
EXPECT_EQ(OutputIndexMethod::array, output2.method());
EXPECT_EQ(30, output2.offset());
EXPECT_EQ(3, output2.stride());
auto index_array_ref = output2.index_array();
EXPECT_EQ(&index_array(1, 2, 3), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
index_array_ref.index_range());
EXPECT_EQ(3, index_array.rank());
const StridedLayout<dynamic_rank, offset_origin> expected_layout(
{1, 2, 3}, {4, 4, 3}, {0, sizeof(Index), 0});
EXPECT_EQ(expected_layout, index_array_ref.layout());
EXPECT_EQ(&index_array(1, 2, 3),
&index_array_ref.shared_array_ref()(1, 2, 3));
EXPECT_EQ(expected_layout, index_array_ref.shared_array_ref().layout());
}
}
TEST(OutputIndexMapTest, Unbroadcast) {
auto index_array = tensorstore::MakeArray<Index>({{{5}, {6}, {7}, {8}}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, IndexTransformBuilder(3, 4)
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3, index_array)
.Finalize());
auto map = t.output_index_maps()[2];
EXPECT_THAT(map.index_array().array_ref(),
MakeOffsetArray<Index>(
{1, 2, 3}, {
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
}));
EXPECT_THAT(UnbroadcastArrayPreserveRank(map.index_array().array_ref()),
index_array);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/output_index_map.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/output_index_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
55dc148d-bb3c-4c44-8572-b7c721de9c6b | cpp | tensorflow/tensorflow | update_api_def | tensorflow/core/api_def/update_api_def.cc | tensorflow/core/api_def/update_api_def_test.cc | #include "tensorflow/core/api_def/update_api_def.h"
#include <ctype.h>
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFileFormat[] = "api_def_%s.pbtxt";
constexpr char kDocStart[] = ".Doc(R\"doc(";
constexpr char kDocEnd[] = ")doc\")";
void FillBaseApiDef(ApiDef* api_def, const OpDef& op) {
api_def->set_graph_op_name(op.name());
for (auto& input_arg : op.input_arg()) {
if (!input_arg.description().empty()) {
auto* api_def_in_arg = api_def->add_in_arg();
api_def_in_arg->set_name(input_arg.name());
api_def_in_arg->set_description(input_arg.description());
}
}
for (auto& output_arg : op.output_arg()) {
if (!output_arg.description().empty()) {
auto* api_def_out_arg = api_def->add_out_arg();
api_def_out_arg->set_name(output_arg.name());
api_def_out_arg->set_description(output_arg.description());
}
}
for (auto& attr : op.attr()) {
if (!attr.description().empty()) {
auto* api_def_attr = api_def->add_attr();
api_def_attr->set_name(attr.name());
api_def_attr->set_description(attr.description());
}
}
api_def->set_summary(op.summary());
api_def->set_description(op.description());
}
bool OpHasDocs(const OpDef& op) {
if (!op.summary().empty() || !op.description().empty()) {
return true;
}
for (const auto& arg : op.input_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& arg : op.output_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& attr : op.attr()) {
if (!attr.description().empty()) {
return true;
}
}
return false;
}
bool CheckDocsMatch(const OpDef& op1, const OpDef& op2) {
if (op1.summary() != op2.summary() ||
op1.description() != op2.description() ||
op1.input_arg_size() != op2.input_arg_size() ||
op1.output_arg_size() != op2.output_arg_size() ||
op1.attr_size() != op2.attr_size()) {
return false;
}
for (int i = 0; i < op1.input_arg_size(); ++i) {
if (op1.input_arg(i).description() != op2.input_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.output_arg_size(); ++i) {
if (op1.output_arg(i).description() != op2.output_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.attr_size(); ++i) {
if (op1.attr(i).description() != op2.attr(i).description()) {
return false;
}
}
return true;
}
bool ValidateOpDocs(const OpDef& op, const string& doc) {
OpDefBuilder b(op.name());
for (const auto& arg : op.input_arg()) {
b.Input(arg.name() + ":string");
}
for (const auto& arg : op.output_arg()) {
b.Output(arg.name() + ":string");
}
for (const auto& attr : op.attr()) {
b.Attr(attr.name() + ":string");
}
b.Doc(doc);
OpRegistrationData op_reg_data;
TF_CHECK_OK(b.Finalize(&op_reg_data));
return CheckDocsMatch(op, op_reg_data.op_def);
}
}
string RemoveDoc(const OpDef& op, const string& file_contents,
size_t start_location) {
const auto doc_start_location = file_contents.find(kDocStart, start_location);
const string format_error = strings::Printf(
"Could not find %s doc for removal. Make sure the doc is defined with "
"'%s' prefix and '%s' suffix or remove the doc manually.",
op.name().c_str(), kDocStart, kDocEnd);
if (doc_start_location == string::npos) {
std::cerr << format_error << std::endl;
LOG(ERROR) << "Didn't find doc start";
return file_contents;
}
const auto doc_end_location = file_contents.find(kDocEnd, doc_start_location);
if (doc_end_location == string::npos) {
LOG(ERROR) << "Didn't find doc start";
std::cerr << format_error << std::endl;
return file_contents;
}
const auto doc_start_size = sizeof(kDocStart) - 1;
string doc_text = file_contents.substr(
doc_start_location + doc_start_size,
doc_end_location - doc_start_location - doc_start_size);
if (!ValidateOpDocs(op, doc_text)) {
LOG(ERROR) << "Invalid doc: " << doc_text;
std::cerr << format_error << std::endl;
return file_contents;
}
auto before_doc = file_contents.substr(0, doc_start_location);
absl::StripTrailingAsciiWhitespace(&before_doc);
return before_doc +
file_contents.substr(doc_end_location + sizeof(kDocEnd) - 1);
}
namespace {
void RemoveDocs(const std::vector<const OpDef*>& ops,
const std::vector<string>& op_files) {
std::set<string> processed_ops;
for (const auto& file : op_files) {
string file_contents;
bool file_contents_updated = false;
TF_CHECK_OK(ReadFileToString(Env::Default(), file, &file_contents));
for (auto op : ops) {
if (processed_ops.find(op->name()) != processed_ops.end()) {
continue;
}
string register_call =
strings::Printf("REGISTER_OP(\"%s\")", op->name().c_str());
const auto register_call_location = file_contents.find(register_call);
if (register_call_location == string::npos) {
continue;
}
std::cout << "Removing .Doc call for " << op->name() << " from " << file
<< "." << std::endl;
file_contents = RemoveDoc(*op, file_contents, register_call_location);
file_contents_updated = true;
processed_ops.insert(op->name());
}
if (file_contents_updated) {
TF_CHECK_OK(WriteStringToFile(Env::Default(), file, file_contents))
<< "Could not remove .Doc calls in " << file
<< ". Make sure the file is writable.";
}
}
}
}
string CreateApiDef(const OpDef& op) {
ApiDefs api_defs;
FillBaseApiDef(api_defs.add_op(), op);
const std::vector<string> multi_line_fields = {"description"};
std::string new_api_defs_str;
::tensorflow::protobuf::TextFormat::PrintToString(api_defs,
&new_api_defs_str);
return PBTxtToMultiline(new_api_defs_str, multi_line_fields);
}
void CreateApiDefs(const OpList& ops, const string& api_def_dir,
const string& op_file_pattern) {
auto* excluded_ops = GetExcludedOps();
std::vector<const OpDef*> new_ops_with_docs;
for (const auto& op : ops.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
string file_path =
io::JoinPath(tensorflow::string(api_def_dir), kApiDefFileFormat);
file_path = strings::Printf(file_path.c_str(), op.name().c_str());
if (!Env::Default()->FileExists(file_path).ok()) {
std::cout << "Creating ApiDef file " << file_path << std::endl;
const auto& api_def_text = CreateApiDef(op);
TF_CHECK_OK(WriteStringToFile(Env::Default(), file_path, api_def_text));
if (OpHasDocs(op)) {
new_ops_with_docs.push_back(&op);
}
}
}
if (!op_file_pattern.empty()) {
std::vector<string> op_files;
TF_CHECK_OK(Env::Default()->GetMatchingPaths(op_file_pattern, &op_files));
RemoveDocs(new_ops_with_docs, op_files);
}
}
} | #include "tensorflow/core/api_def/update_api_def.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(UpdateApiDefTest, TestRemoveDocSingleOp) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op1.
Description
for Op1.
b : Description for b.
a: Description for a.
output: Description for output.
)doc");
)opdef";
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
EXPECT_EQ(op_def_text,
RemoveDoc(op, op_def_text_with_doc, 0 ));
}
TEST(UpdateApiDefTest, TestRemoveDocMultipleOps) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Doc(R"doc(
Summary for Op1.
)doc")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op2.
)doc");
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op3.
)doc");
)opdef";
const string op1_text = R"(
name: "Op1"
input_arg {
name: "a"
}
summary: "Summary for Op1."
)";
const string op2_text = R"(
name: "Op2"
input_arg {
name: "a"
}
summary: "Summary for Op2."
)";
const string op3_text = R"(
name: "Op3"
input_arg {
name: "c"
}
summary: "Summary for Op3."
)";
OpDef op1, op2, op3;
protobuf::TextFormat::ParseFromString(op1_text, &op1);
protobuf::TextFormat::ParseFromString(op2_text, &op2);
protobuf::TextFormat::ParseFromString(op3_text, &op3);
string updated_text =
RemoveDoc(op2, op_def_text_with_doc,
op_def_text_with_doc.find("Op2") );
EXPECT_EQ(string::npos, updated_text.find("Summary for Op2"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op1"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op3"));
updated_text = RemoveDoc(op3, updated_text,
updated_text.find("Op3") );
updated_text = RemoveDoc(op1, updated_text,
updated_text.find("Op1") );
EXPECT_EQ(op_def_text, updated_text);
}
TEST(UpdateApiDefTest, TestCreateApiDef) {
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
const string expected_api_def = R"(op {
graph_op_name: "Op1"
in_arg {
name: "a"
description: <<END
Description for a.
END
}
out_arg {
name: "output"
description: <<END
Description for output.
END
}
attr {
name: "b"
description: <<END
Description for b.
END
}
summary: "Summary for Op1."
description: <<END
Description
for Op1.
END
}
)";
EXPECT_EQ(expected_api_def, CreateApiDef(op));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/api_def/update_api_def.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/api_def/update_api_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
101f50fd-1f21-43e6-97f3-00b657bd3792 | cpp | tensorflow/tensorflow | exp | tensorflow/lite/kernels/exp.cc | tensorflow/lite/kernels/exp_test.cc | #include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace exp {
enum KernelType {
kReference,
};
struct ExpContext {
ExpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
union {
int8_t lut_int8[LUTSize<int8_t>()];
int16_t lut_int16[LUTSize<int16_t>()];
};
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
ExpContext op_context(context, node);
const TfLiteTensor* input = op_context.input;
TfLiteTensor* output = op_context.output;
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims);
output->type = input->type;
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point, [](float value) { return std::exp(value); },
data->lut_int8);
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
LUTPopulate<int16_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point, [](float value) { return std::exp(value); },
data->lut_int16);
}
return context->ResizeTensor(context, op_context.output, output_dims);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
ExpContext op_context(context, node);
if (kernel_type == kReference) {
switch (op_context.input->type) {
case kTfLiteFloat32:
reference_ops::Exp(GetTensorData<float>(op_context.input),
NumElements(op_context.input),
GetTensorData<float>(op_context.output));
break;
case kTfLiteInt8:
reference_integer_ops::LookupTable(
GetTensorData<int8_t>(op_context.input),
NumElements(op_context.input), data->lut_int8,
GetTensorData<int8_t>(op_context.output));
break;
case kTfLiteInt16:
reference_integer_ops::LookupTable(
GetTensorData<int16_t>(op_context.input),
NumElements(op_context.input), data->lut_int16,
GetTensorData<int16_t>(op_context.output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by Exp.",
op_context.input->type);
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXP_REF() {
static TfLiteRegistration r = {exp::Init, exp::Free, exp::Prepare,
exp::Eval<exp::kReference>};
return &r;
}
TfLiteRegistration* Register_EXP() { return Register_EXP_REF(); }
}
}
} | #include <math.h>
#include <initializer_list>
#include <limits>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BaseExpOpModel : public SingleOpModel {
public:
BaseExpOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_EXP, BuiltinOptions_ExpOptions,
CreateExpOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int output_;
};
class FloatExpOpModel : public BaseExpOpModel {
public:
using BaseExpOpModel::BaseExpOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
class QuantizedExpOpModel : public BaseExpOpModel {
public:
using BaseExpOpModel::BaseExpOpModel;
template <class T>
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename integer_dtype>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
template <typename T>
inline float GetTolerance(float min, float max) {
float kQuantizedTolerance = (max - min) / (std::numeric_limits<T>::max() -
std::numeric_limits<T>::min());
if (std::is_same<T, int8_t>::value) {
kQuantizedTolerance += (max - min) / 256.0f;
} else if (std::is_same<T, int16_t>::value) {
kQuantizedTolerance += (max - min) / 512.0f;
}
return kQuantizedTolerance;
}
TEST(ExpOpTest, ExpFloat) {
std::initializer_list<float> data = {0.0f, 1.0f, -1.0f, 100.0f,
-100.0f, 0.01f, -0.01f};
FloatExpOpModel m({TensorType_FLOAT32, {1, 1, 7}}, {TensorType_FLOAT32, {}});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 7}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{std::exp(0.0f), std::exp(1.0f), std::exp(-1.0f), std::exp(100.0f),
std::exp(-100.0f), std::exp(0.01f), std::exp(-0.01f)})));
}
template <TensorType tensor_type, typename integer_dtype>
void QuantizedExpSymmetricTest() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
const float kQuantizedTolerance = GetTolerance<integer_dtype>(-3.1, 3.1);
QuantizedExpOpModel m({tensor_type, {1, 2, 2, 2}, 1.3f * kMin, 1.3f * kMax},
{tensor_type, {}, 3.01f * kMin, 3.01f * kMax});
m.SetInput<integer_dtype>({-1.3, -1.0, -0.3, 0, 0.1, 0.5, 1.0, 1.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{0.2725, 0.3679, 0.7408, 1.0, 1.1052, 1.6487, 2.7183, 3.0042},
kQuantizedTolerance)));
}
TEST(ExpOpTest, ExpSymmetricInt8) {
QuantizedExpSymmetricTest<TensorType_INT8, int8_t>();
}
TEST(ExpOpTest, ExpSymmetricInt16) {
QuantizedExpSymmetricTest<TensorType_INT16, int16_t>();
}
template <TensorType tensor_type, typename integer_dtype>
void QuantizedExpAsymmetricTest() {
const float kQuantizedTolerance = GetTolerance<integer_dtype>(-1.3, 3.01);
QuantizedExpOpModel m({tensor_type, {1, 2, 2, 2}, -1.3, 1.1},
{tensor_type, {}, 0.0, 3.01});
m.SetInput<integer_dtype>({-1.3, -1.0, -0.3, 0, 0.1, 0.5, 1.0, 1.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{0.2725, 0.3679, 0.7408, 1.0, 1.1052, 1.6487, 2.7183, 3.0042},
kQuantizedTolerance)));
}
TEST(ExpOpTest, ExpAsymmetricInt8) {
QuantizedExpAsymmetricTest<TensorType_INT8, int8_t>();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/exp.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/exp_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb44438c-84cc-487a-a607-7f89b0335637 | cpp | tensorflow/tensorflow | mkl_util | tensorflow/core/util/mkl_util.h | tensorflow/core/util/mkl_util_test.cc | #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <list>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "dnnl.hpp"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/onednn_env_vars.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
#include "xla/tsl/util/onednn_threadpool.h"
using dnnl::engine;
using dnnl::memory;
using dnnl::primitive;
using dnnl::reorder;
using dnnl::stream;
using CPUDevice = Eigen::ThreadPoolDevice;
using MemoryArgsMap = std::unordered_map<int, memory>;
using ReorderPd = dnnl::reorder::primitive_desc;
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
typedef enum {
Dim3d_N = 0,
Dim3d_C = 1,
Dim3d_D = 2,
Dim3d_H = 3,
Dim3d_W = 4,
Dim3d_O = 0,
Dim3d_I = 1
} MklDnnDims3D;
typedef enum {
TF_2DFILTER_DIM_H = 0,
TF_2DFILTER_DIM_W = 1,
TF_2DFILTER_DIM_I = 2,
TF_2DFILTER_DIM_O = 3
} TFFilterDims2d;
typedef enum {
TF_3DFILTER_DIM_P = 0,
TF_3DFILTER_DIM_H = 1,
TF_3DFILTER_DIM_W = 2,
TF_3DFILTER_DIM_I = 3,
TF_3DFILTER_DIM_O = 4
} TFFilterDims3d;
typedef enum {
MKL_GROUP_FILTER_DIM_G = 0,
MKL_GROUP_FILTER_DIM_O = 1,
MKL_GROUP_FILTER_DIM_I = 2,
MKL_GROUP_FILTER_DIM_H = 3,
MKL_GROUP_FILTER_DIM_W = 4
} MklDnnFilterGroupDims;
enum class MklQuantization {
QUANTIZED_VERSION,
FP_VERSION,
};
static const int kSmallBatchSize = 32;
enum class OneDNNMathModeSetting {
kNone = 0,
kBF16,
};
inline OneDNNMathModeSetting SetFPMathMode() {
static OneDNNMathModeSetting math_mode = [] {
OneDNNMathModeSetting mode = OneDNNMathModeSetting::kNone;
if (FPMathModeSetting() == "BF16") {
if (dnnl::set_default_fpmath_mode(dnnl::fpmath_mode::bf16) ==
dnnl::status::success) {
mode = OneDNNMathModeSetting::kBF16;
}
}
return mode;
}();
return math_mode;
}
inline void execute_primitives(
std::vector<dnnl::primitive>& primitives, std::shared_ptr<stream> stream,
std::vector<std::unordered_map<int, memory>>& net_args) {
DCHECK_EQ(primitives.size(), net_args.size());
for (size_t i = 0; i < primitives.size(); ++i) {
primitives.at(i).execute(*stream, net_args.at(i));
}
}
#ifndef ENABLE_ONEDNN_V3
#define ARE_MEMORY_DESCS_EQUAL(md1, md2) dnnl_memory_desc_equal(&md1, &md2)
#define CREATE_MEMORY_DESC_USING_STRIDES dnnl_memory_desc_init_by_strides
#define GET_DATA_TYPE data_type
#define GET_DIMS dims
#define GET_INNER_BLKS format_desc.blocking.inner_blks
#define GET_INNER_DIMS(dims, dims_1) dims_1
#define GET_INNER_IDXS format_desc.blocking.inner_idxs
#define GET_INNER_NBLKS format_desc.blocking.inner_nblks
#define GET_MEMORY_DESC get_desc().data
#define GET_MEMORY_DESC_FLAGS extra.flags
#define GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR GetMklLayout().data
#define GET_NDIMS ndims
#define GET_STRIDES format_desc.blocking.strides
#define GET_STRIDES_DIMS(dims, dims_outer_blocks) dims_outer_blocks
#define INIT_DIMS_FROM_DESC(in_dims, md) in_dims(md.dims, &md.dims[md.ndims])
#define MEMORY_DESC dnnl_memory_desc_t
#else
#define ARE_MEMORY_DESCS_EQUAL(md1, md2) md1 == md2
#define CREATE_MEMORY_DESC_USING_STRIDES dnnl_memory_desc_create_with_strides
#define GET_DATA_TYPE get_data_type()
#define GET_DIMS get_dims()
#define GET_INNER_BLKS get_inner_blks()
#define GET_INNER_DIMS(dims, dims_1) dims
#define GET_INNER_IDXS get_inner_idxs()
#define GET_INNER_NBLKS get_inner_nblks()
#define GET_MEMORY_DESC get_desc()
#define GET_MEMORY_DESC_FLAGS get_size()
#define GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR GetMklLayout()
#define GET_NDIMS get_ndims()
#define GET_STRIDES get_strides()
#define GET_STRIDES_DIMS(dims, dims_outer_blocks) dims
#define INIT_DIMS_FROM_DESC(in_dims, md) in_dims = md.get_dims()
#define MEMORY_DESC memory::desc
#endif
enum class MklTensorFormat {
FORMAT_NHWC = 0,
FORMAT_NCHW = 1,
FORMAT_NDHWC = 2,
FORMAT_NCDHW = 3,
FORMAT_X = 4,
FORMAT_NC = 5,
FORMAT_TNC = 6,
FORMAT_BLOCKED = 7,
FORMAT_INVALID = 8,
};
memory::format_tag MklTensorFormatToMklDnnDataFormat(MklTensorFormat format);
TensorFormat MklDnn3DDataFormatToTFDataFormat(MklTensorFormat format);
TensorFormat MklDnnDataFormatToTFDataFormat(MklTensorFormat format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
Status CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype,
dnnl_memory_desc_t* blocked_md);
inline std::ostream& operator<<(std::ostream& os,
const memory::format_tag& tag) {
if (tag == memory::format_tag::undef) {
os << "undef";
} else if (tag == memory::format_tag::any) {
os << "any";
} else {
os << "invalid";
}
return os;
}
inline void operator<<(std::ostream& os, const MklTensorFormat& format) {
if (format == MklTensorFormat::FORMAT_NHWC) {
os << "FORMAT_NHWC";
} else if (format == MklTensorFormat::FORMAT_NCHW) {
os << "FORMAT_NCHW";
} else if (format == MklTensorFormat::FORMAT_NDHWC) {
os << "FORMAT_NDHWC";
} else if (format == MklTensorFormat::FORMAT_NCDHW) {
os << "FORMAT_NCDHW";
} else if (format == MklTensorFormat::FORMAT_X) {
os << "FORMAT_X";
} else if (format == MklTensorFormat::FORMAT_NC) {
os << "FORMAT_NC";
} else if (format == MklTensorFormat::FORMAT_TNC) {
os << "FORMAT_TNC";
} else if (format == MklTensorFormat::FORMAT_BLOCKED) {
os << "FORMAT_BLOCKED";
} else {
os << "INVALID FORMAT";
}
}
template <typename T>
inline bool array_cmp(const T* a1, const T* a2, size_t size) {
for (size_t i = 0; i < size; ++i)
if (a1[i] != a2[i]) return false;
return true;
}
inline dnnl::stream* CreateStream(tsl::OneDnnThreadPool* eigen_tp,
const engine& engine) {
#ifndef ENABLE_ONEDNN_OPENMP
if (eigen_tp != nullptr) {
stream* tp_stream =
new stream(dnnl::threadpool_interop::make_stream(engine, eigen_tp));
return tp_stream;
} else {
stream* tp_stream = new stream(engine);
return tp_stream;
}
#else
stream* tp_stream = new stream(engine);
return tp_stream;
#endif
}
class MklDnnShape {
private:
struct MklShapeData {
bool is_mkl_tensor_ = false;
size_t dimension_ = 0;
dnnl_dims_t sizes_;
MklTensorFormat tf_data_format_ = MklTensorFormat::FORMAT_BLOCKED;
memory::data_type T_ = memory::data_type::undef;
MEMORY_DESC mkl_md_;
dnnl_dims_t map_;
};
MklShapeData data_;
typedef std::remove_extent<dnnl_dims_t>::type dnnl_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() : data_{} {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
MklDnnShape(const MklDnnShape&) = delete;
void operator=(const MklDnnShape&) = delete;
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
if (this->IsMklTensor()) {
auto const& cur_md = this->GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR;
auto const& input_shape_md =
input_shape.GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR;
return (this->GetTfShape() == input_shape.GetTfShape()) &&
ARE_MEMORY_DESCS_EQUAL(cur_md, input_shape_md);
}
return true;
}
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline size_t GetDimension3D(char dimension) const {
int index = GetMklDnnTensor3DDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
}
inline int32 GetMklDnnTensor3DDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims3D::Dim3d_N;
case 'C':
return MklDnnDims3D::Dim3d_C;
case 'D':
return MklDnnDims3D::Dim3d_D;
case 'H':
return MklDnnDims3D::Dim3d_H;
case 'W':
return MklDnnDims3D::Dim3d_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != MklTensorFormat::FORMAT_BLOCKED) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
#ifndef ENABLE_ONEDNN_V3
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
#else
inline void SetMklLayout(const memory::desc& md) { data_.mkl_md_ = md; }
#endif
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline MklTensorFormat GetTfDataFormat() const {
return data_.tf_data_format_;
}
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
MklTensorFormat format) {
DCHECK_EQ(dims, sizes.size())
<< "SetTfLayout: Number of dimensions does not"
"match with dimension array";
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ++ii) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != MklTensorFormat::FORMAT_BLOCKED) {
if (dims == 2) {
data_.map_[0] = MklDnnDims::Dim_N;
data_.map_[1] = MklDnnDims::Dim_C;
} else {
SetTfDimOrder(dims, format);
}
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ++ii) {
dims.push_back(data_.sizes_[ii]);
}
if (data_.tf_data_format_ == MklTensorFormat::FORMAT_BLOCKED) {
auto strides = CalculateTFStrides(dims);
dnnl_memory_desc_t blocked_md;
TF_CHECK_OK(
CreateBlockedMemDescHelper(dims, strides, data_.T_, &blocked_md));
return memory::desc(blocked_md);
} else {
auto format_tag =
MklTensorFormatToMklDnnDataFormat(data_.tf_data_format_);
return memory::desc(dims, data_.T_, format_tag);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
inline void SetTfDimOrder(const size_t dimension, const dnnl_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
if (dimension == 5) {
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<3>(data_format, '0')] =
MklDnnDims3D::Dim3d_D;
data_.map_[GetTensorDimIndex<3>(data_format, '1')] =
MklDnnDims3D::Dim3d_H;
data_.map_[GetTensorDimIndex<3>(data_format, '2')] =
MklDnnDims3D::Dim3d_W;
data_.map_[GetTensorDimIndex<3>(data_format, 'C')] =
MklDnnDims3D::Dim3d_C;
data_.map_[GetTensorDimIndex<3>(data_format, 'N')] =
MklDnnDims3D::Dim3d_N;
} else {
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
}
inline void SetTfDimOrder(const size_t dimension, MklTensorFormat format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const dnnl_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
inline Eigen::ThreadPoolInterface* EigenThreadPoolFromTfContext(
OpKernelContext* context) {
return context->device()
->tensorflow_cpu_worker_threads()
->workers->AsEigenThreadPool();
}
typedef std::vector<MklDnnShape> MklDnnShapeList;
template <typename T>
class MklDnnData;
inline void ExecutePrimitive(const std::vector<primitive>& net,
const std::vector<MemoryArgsMap>* net_args,
const engine& cpu_engine,
OpKernelContext* context = nullptr) {
DCHECK(net_args);
DCHECK_EQ(net.size(), net_args->size());
std::unique_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (context != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, cpu_engine));
} else {
cpu_stream.reset(CreateStream(nullptr, cpu_engine));
}
for (size_t i = 0; i < net.size(); ++i) {
net.at(i).execute(*cpu_stream, net_args->at(i));
}
cpu_stream->wait();
}
template <typename T>
inline Status ConvertMklToTF(OpKernelContext* context,
const Tensor& input_mkl_tensor,
const MklDnnShape& input_mkl_shape,
Tensor* output_tf_tensor) {
try {
if (!input_mkl_shape.IsMklTensor()) {
*output_tf_tensor = input_mkl_tensor;
return OkStatus();
}
TensorShape output_tf_shape = input_mkl_shape.GetTfShape();
TF_CHECK_OK(context->allocate_temp(DataTypeToEnum<T>::v(), output_tf_shape,
output_tf_tensor));
engine cpu_engine(engine::kind::cpu, 0);
MklDnnData<T> input(&cpu_engine);
auto input_mkl_md = input_mkl_shape.GetMklLayout();
auto output_tf_md = input_mkl_shape.GetTfLayout();
input.SetUsrMem(input_mkl_md, &input_mkl_tensor);
if (input.IsReorderNeeded(output_tf_md)) {
std::vector<primitive> net;
std::vector<MemoryArgsMap> net_args;
bool status = input.CheckReorderToOpMem(output_tf_md, output_tf_tensor,
net, net_args, cpu_engine);
if (!status) {
return absl::InternalError(
"ConvertMklToTF(): Failed to create reorder for input");
}
ExecutePrimitive(net, &net_args, cpu_engine, context);
} else {
bool status =
output_tf_tensor->CopyFrom(input_mkl_tensor, output_tf_shape);
if (!status) {
return absl::InternalError(
"ConvertMklToTF(): Failed to forward input tensor to output");
}
}
return OkStatus();
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
}
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape,
bool eager_mode) {
if (!eager_mode) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
} else {
mklshape->SetMklTensor(false);
}
}
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
GetMklShape(ctext, n, mklshape, false);
}
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
TF_CHECK_OK(ctext->input_list(name, input_tensors));
}
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes,
bool native_format = false) {
if (!native_format) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
} else {
for (int i = 0; i < mkl_shapes->size(); ++i) {
(*mkl_shapes)[i].SetMklTensor(false);
}
}
}
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx,
bool eager_mode = false) {
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape, eager_mode);
if (input_mkl_shape.IsMklTensor() && !eager_mode) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape,
bool eager_mode = false) {
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
if (!eager_mode) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
template <typename T>
struct UserScratchPad {
template <typename MklPrim>
inline void AllocateSPTensor(MklPrim* mkl_prim, OpKernelContext* context) {
allocated_ = false;
auto spad_md = mkl_prim->GetScratchPadDesc();
size_t spad_size = spad_md.get_size();
if (spad_size == 0) return;
size_t allocate_size = (spad_size + sizeof(T) - 1) / sizeof(T);
TensorShape tf_shape;
tf_shape.AddDim(allocate_size);
AllocTmpBuffer<T>(context, &scratch_pad_, tf_shape);
allocated_ = true;
}
inline void* Get() {
if (allocated_) {
return static_cast<void*>(scratch_pad_.flat<T>().data());
} else {
return nullptr;
}
}
private:
Tensor scratch_pad_;
bool allocated_ = false;
};
inline void GetStridesFromSizes(MklTensorFormat data_format, size_t* strides,
const size_t* sizes) {
DCHECK_NE(data_format, MklTensorFormat::FORMAT_INVALID);
if (data_format == MklTensorFormat::FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline bool ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
Tensor** output,
const MklDnnShape& mkl_shape,
bool always_forward = true) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
bool is_forwarded = false;
const Tensor& input_tensor = context->input(idx_data_in);
const auto output_shape = input_tensor.shape();
if (always_forward) {
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, input_tensor);
}
} else {
is_forwarded = context->forward_input_to_output_with_shape(
idx_data_in, idx_data_out, output_shape, output);
}
if (is_forwarded || always_forward) {
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
return true;
}
return false;
}
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
inline Tensor GetMklMetaTensor() {
MklDnnShape non_mkl_shape;
non_mkl_shape.SetMklTensor(false);
auto size = static_cast<int64_t>(non_mkl_shape.GetSerializeBufferSize());
Tensor tensor(DT_UINT8, {size});
non_mkl_shape.SerializeMklDnnShape(tensor.flat<uint8>().data(),
size * sizeof(uint8));
return tensor;
}
template <typename T>
static memory::data_type MklDnnType();
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
template <>
memory::data_type MklDnnType<quint8>() {
return memory::data_type::u8;
}
template <>
memory::data_type MklDnnType<uint8>() {
return memory::data_type::u8;
}
template <>
memory::data_type MklDnnType<qint8>() {
return memory::data_type::s8;
}
template <>
memory::data_type MklDnnType<qint32>() {
return memory::data_type::s32;
}
template <>
memory::data_type MklDnnType<bfloat16>() {
return memory::data_type::bf16;
}
template <>
memory::data_type MklDnnType<Eigen::half>() {
return memory::data_type::f16;
}
inline memory::format_tag MklTensorFormatToMklDnnDataFormat(
MklTensorFormat format) {
if (format == MklTensorFormat::FORMAT_NHWC) return memory::format_tag::nhwc;
if (format == MklTensorFormat::FORMAT_NCHW) return memory::format_tag::nchw;
if (format == MklTensorFormat::FORMAT_NDHWC) return memory::format_tag::ndhwc;
if (format == MklTensorFormat::FORMAT_NCDHW) return memory::format_tag::ncdhw;
if (format == MklTensorFormat::FORMAT_X) return memory::format_tag::x;
if (format == MklTensorFormat::FORMAT_NC) return memory::format_tag::nc;
if (format == MklTensorFormat::FORMAT_TNC) return memory::format_tag::tnc;
return memory::format_tag::undef;
}
inline MklTensorFormat TFDataFormatToMklDnn3DDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC) return MklTensorFormat::FORMAT_NDHWC;
if (format == FORMAT_NCHW) return MklTensorFormat::FORMAT_NCDHW;
TF_CHECK_OK(absl::InvalidArgumentError("Unsupported data format"));
return MklTensorFormat::FORMAT_INVALID;
}
inline MklTensorFormat TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC) return MklTensorFormat::FORMAT_NHWC;
if (format == FORMAT_NCHW) return MklTensorFormat::FORMAT_NCHW;
TF_CHECK_OK(absl::InvalidArgumentError("Unsupported data format"));
return MklTensorFormat::FORMAT_INVALID;
}
inline TensorFormat MklDnnDataFormatToTFDataFormat(MklTensorFormat format) {
if (format == MklTensorFormat::FORMAT_NHWC ||
format == MklTensorFormat::FORMAT_NDHWC)
return FORMAT_NHWC;
if (format == MklTensorFormat::FORMAT_NCHW ||
format == MklTensorFormat::FORMAT_NCDHW)
return FORMAT_NCHW;
TF_CHECK_OK(absl::InvalidArgumentError("Unsupported data format"));
return FORMAT_NHWC;
}
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnnDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
return memory::dims({n, c, h, w});
}
inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnn3DDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C'));
int d = shape.dim_size(GetTensorDimIndex<3>(format, '0'));
int h = shape.dim_size(GetTensorDimIndex<3>(format, '1'));
int w = shape.dim_size(GetTensorDimIndex<3>(format, '2'));
return memory::dims({n, c, d, h, w});
}
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnnDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
return memory::dims({n, c, h, w});
}
inline memory::dims MklDnnDimsInNCDHW(const memory::dims& in_dims,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnnDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = in_dims[GetTensorDimIndex<3>(format, 'N')];
int c = in_dims[GetTensorDimIndex<3>(format, 'C')];
int d = in_dims[GetTensorDimIndex<3>(format, '0')];
int h = in_dims[GetTensorDimIndex<3>(format, '1')];
int w = in_dims[GetTensorDimIndex<3>(format, '2')];
return memory::dims({n, c, d, h, w});
}
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline Status CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype,
dnnl_memory_desc_t* blocked_md) {
DCHECK_EQ(dim.size(), strides.size());
const int kNumDims = dim.size();
dnnl_dim_t* input_dims = new dnnl_dim_t[kNumDims];
dnnl_dim_t* input_strides = new dnnl_dim_t[kNumDims];
for (int i = 0; i < kNumDims; ++i) {
input_dims[i] = dim[i];
input_strides[i] = strides[i];
}
try {
CREATE_MEMORY_DESC_USING_STRIDES(blocked_md, kNumDims, input_dims,
memory::convert_to_c(dtype),
input_strides);
delete[] input_dims;
delete[] input_strides;
} catch (dnnl::error& e) {
delete[] input_dims;
delete[] input_strides;
return absl::InternalError(
absl::StrCat("Failed to create blocked memory descriptor.",
"Status: ", e.status, ", message: ", e.message));
}
return OkStatus();
}
inline void CreateAndExecuteReorder(const ReorderPd& reorder_desc,
const memory& src_mem,
const memory& dst_mem, const engine& engine,
OpKernelContext* ctx = nullptr,
memory* scale_mem = nullptr) {
std::vector<primitive> net;
net.push_back(dnnl::reorder(reorder_desc));
std::vector<MemoryArgsMap> net_args;
#ifndef ENABLE_ONEDNN_V3
net_args.push_back({{DNNL_ARG_FROM, src_mem}, {DNNL_ARG_TO, dst_mem}});
#else
if (scale_mem != nullptr) {
net_args.push_back({{DNNL_ARG_FROM, src_mem},
{DNNL_ARG_TO, dst_mem},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, *scale_mem}});
} else {
net_args.push_back({{DNNL_ARG_FROM, src_mem}, {DNNL_ARG_TO, dst_mem}});
}
#endif
ExecutePrimitive(net, &net_args, engine, ctx);
}
class MklReorderPrimitive;
template <typename T>
inline MklReorderPrimitive* FindOrCreateReorder(const memory* from,
const memory* to);
template <typename T>
class MklDnnData {
private:
memory* user_memory_;
memory* reorder_memory_;
memory::desc* op_md_;
bool bIs3D;
void* allocated_buffer_;
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
bIs3D(false),
allocated_buffer_(nullptr),
cpu_engine_(e) {}
MklDnnData(const MklDnnData&) = default;
MklDnnData& operator=(const MklDnnData&) = delete;
~MklDnnData() {
if (allocated_buffer_ != nullptr) {
cpu_allocator()->DeallocateRaw(allocated_buffer_);
}
cpu_engine_ = nullptr;
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; }
bool GetIs3D() { return bIs3D; }
inline void SetUsrMem(const memory::dims& dim, memory::format_tag fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format_tag fm,
const Tensor* tensor) {
DCHECK(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
dnnl_memory_desc_t blocked_md;
TF_CHECK_OK(
CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>(), &blocked_md));
return memory::desc(blocked_md);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
inline void SetUsrMem(const memory::desc& pd, void* data_buffer = nullptr) {
DCHECK(cpu_engine_);
if (user_memory_) delete user_memory_;
if (data_buffer) {
user_memory_ = new memory(pd, *cpu_engine_, data_buffer);
} else {
user_memory_ = new memory(pd, *cpu_engine_);
}
}
inline const memory* GetUsrMem() const { return user_memory_; }
inline memory::desc GetUsrMemDesc() const {
DCHECK(user_memory_);
return user_memory_->get_desc();
}
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
inline void SetUsrMemDataHandle(void* data_buffer,
std::shared_ptr<stream> t_stream = nullptr) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
user_memory_->set_data_handle(data_buffer, *t_stream);
#else
user_memory_->set_data_handle(data_buffer);
#endif
}
inline void SetUsrMemDataHandle(const Tensor* tensor,
std::shared_ptr<stream> t_stream = nullptr) {
SetUsrMemDataHandle(GetTensorBuffer(tensor), t_stream);
}
inline void AllocateBuffer(size_t size) {
const int64 kMemoryAlignment = 64;
allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlignment, size);
}
inline void* GetAllocatedBuffer() { return allocated_buffer_; }
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
inline void SetOpMemDesc(const memory::dims& dim, memory::format_tag fm) {
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
inline bool IsReorderNeeded(const memory::desc& op_pd) const {
DCHECK(user_memory_);
return op_pd != user_memory_->get_desc();
}
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args,
const engine& engine) {
DCHECK(user_memory_);
DCHECK_EQ(net.size(), net_args.size());
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine);
net.push_back(CreateReorder(user_memory_, reorder_memory_));
net_args.push_back(MemoryArgsMap{{DNNL_ARG_FROM, *user_memory_},
{DNNL_ARG_TO, *reorder_memory_}});
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
const engine& engine,
OpKernelContext* context = nullptr) {
DCHECK(user_memory_);
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine);
auto* prim = FindOrCreateReorder<T>(user_memory_, reorder_memory_);
std::shared_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (context != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, prim->GetEngine()));
} else {
cpu_stream.reset(CreateStream(nullptr, prim->GetEngine()));
}
std::vector<primitive> net;
net.push_back(*(prim->GetPrimitive()));
std::vector<MemoryArgsMap> net_args;
net_args.push_back(
{{DNNL_ARG_FROM, *user_memory_}, {DNNL_ARG_TO, *reorder_memory_}});
execute_primitives(net, cpu_stream, net_args);
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
void* reorder_data_handle,
std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args,
const engine& engine) {
DCHECK(reorder_data_handle);
DCHECK(user_memory_);
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine, reorder_data_handle);
net.push_back(CreateReorder(user_memory_, reorder_memory_));
net_args.push_back(MemoryArgsMap{{DNNL_ARG_FROM, *user_memory_},
{DNNL_ARG_TO, *reorder_memory_}});
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
void* reorder_data_handle,
const engine& engine,
OpKernelContext* context = nullptr) {
DCHECK(reorder_data_handle);
DCHECK(user_memory_);
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine, reorder_data_handle);
auto* prim = FindOrCreateReorder<T>(user_memory_, reorder_memory_);
std::shared_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (context != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, prim->GetEngine()));
} else {
cpu_stream.reset(CreateStream(nullptr, prim->GetEngine()));
}
std::vector<primitive> net;
net.push_back(*(prim->GetPrimitive()));
std::vector<MemoryArgsMap> net_args;
net_args.push_back(
{{DNNL_ARG_FROM, *user_memory_}, {DNNL_ARG_TO, *reorder_memory_}});
execute_primitives(net, cpu_stream, net_args);
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
Tensor* reorder_tensor,
std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args,
const engine& engine) {
DCHECK(reorder_tensor);
return CheckReorderToOpMem(op_md, GetTensorBuffer(reorder_tensor), net,
net_args, engine);
}
inline bool CheckReorderToOpMem(const memory::desc& op_pd,
Tensor* reorder_tensor,
OpKernelContext* ctx = nullptr) {
DCHECK(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor),
*cpu_engine_, ctx);
}
inline bool PrepareReorderToUserMemIfReq(const memory::desc& op_pd) {
DCHECK(user_memory_);
if (IsReorderNeeded(op_pd)) {
reorder_memory_ = new memory(op_pd, *cpu_engine_);
return true;
}
return false;
}
inline void InsertReorderToUserMem(std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args) {
DCHECK(user_memory_);
DCHECK(reorder_memory_);
net.push_back(CreateReorder(reorder_memory_, user_memory_));
net_args.push_back(MemoryArgsMap{{DNNL_ARG_FROM, *reorder_memory_},
{DNNL_ARG_TO, *user_memory_}});
}
inline void InsertReorderToUserMem(OpKernelContext* ctx = nullptr) {
DCHECK(user_memory_);
DCHECK(reorder_memory_);
DCHECK(cpu_engine_);
std::vector<primitive> net;
auto* prim = FindOrCreateReorder<T>(reorder_memory_, user_memory_);
net.push_back(*(prim->GetPrimitive()));
std::vector<MemoryArgsMap> net_args;
net_args.push_back(
{{DNNL_ARG_FROM, *reorder_memory_}, {DNNL_ARG_TO, *user_memory_}});
std::shared_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (ctx != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(ctx);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, prim->GetEngine()));
} else {
cpu_stream.reset(CreateStream(nullptr, prim->GetEngine()));
}
execute_primitives(net, cpu_stream, net_args);
}
};
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
MklPrimitive() {}
MklPrimitive(const engine& cpu_engine) { cpu_engine_ = cpu_engine; }
unsigned char* DummyData = nullptr;
engine cpu_engine_ = engine(engine::kind::cpu, 0);
const engine& GetEngine() { return cpu_engine_; }
};
const dnnl::memory::dims NONE_DIMS = {};
template <typename T>
class LRUCache {
public:
explicit LRUCache(size_t capacity) {
capacity_ = capacity;
Clear();
}
T* GetOp(const string& key) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(lru_mu_);
#endif
auto it = cache_.find(key);
if (it == cache_.end()) {
return nullptr;
}
lru_list_.erase(it->second.lru_iterator);
lru_list_.push_front(it->first);
it->second.lru_iterator = lru_list_.begin();
return it->second.op;
}
void SetOp(const string& key, T* op) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(lru_mu_);
#endif
if (lru_list_.size() >= capacity_) {
Delete();
}
lru_list_.push_front(key);
Entry entry(op, lru_list_.begin());
cache_.emplace(std::make_pair(key, std::move(entry)));
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
FinishedAllocation(key);
#endif
}
void Clear() {
if (lru_list_.empty()) return;
cache_.clear();
lru_list_.clear();
}
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
bool IsAllocating(const string& key) {
mutex_lock lock(in_flight_mu_);
return in_flight_.find(key) != in_flight_.end();
}
void Allocate(const string& key) {
mutex_lock lock(in_flight_mu_);
in_flight_.insert(key);
}
void FinishedAllocation(const string& key) {
mutex_lock lock(in_flight_mu_);
in_flight_.erase(key);
}
#endif
private:
struct Entry {
T* op;
std::list<string>::iterator lru_iterator;
Entry(T* op, std::list<string>::iterator it) {
this->op = op;
this->lru_iterator = it;
}
Entry(Entry&& source) noexcept
: lru_iterator(std::move(source.lru_iterator)) {
op = std::move(source.op);
source.op = std::forward<T*>(nullptr);
}
~Entry() {
if (op != nullptr) delete op;
}
};
bool Delete() {
if (lru_list_.empty()) return false;
string key = lru_list_.back();
lru_list_.pop_back();
cache_.erase(key);
return true;
}
size_t capacity_;
std::unordered_map<string, Entry> cache_;
std::list<string> lru_list_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex lru_mu_;
std::set<string> in_flight_;
TF_GUARDED_BY(in_flight_mu_)
mutex in_flight_mu_;
#endif
};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const string& key) {
#if !defined(DNNL_AARCH64_USE_ACL) || !defined(ENABLE_ONEDNN_OPENMP)
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
return lru_cache.GetOp(key);
#else
while (true) {
mutex_lock lock(primitive_creation_mu_);
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
MklPrimitive* primitive = lru_cache.GetOp(key);
if (primitive != nullptr) {
return primitive;
}
if (!lru_cache.IsAllocating(key)) {
lru_cache.Allocate(key);
return nullptr;
}
primitive_creation_cv_.wait(lock);
}
#endif
}
void SetOp(const string& key, MklPrimitive* op) {
#if !defined(DNNL_AARCH64_USE_ACL) || !defined(ENABLE_ONEDNN_OPENMP)
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
lru_cache.SetOp(key, op);
#else
{
mutex_lock lock(primitive_creation_mu_);
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
lru_cache.SetOp(key, op);
}
primitive_creation_cv_.notify_all();
#endif
}
static inline bool IsLegacyPlatform() {
#ifdef DNNL_AARCH64_USE_ACL
return false;
#else
static const bool is_legacy_platform =
(!port::TestCPUFeature(port::CPUFeature::AVX512F) &&
!port::TestCPUFeature(port::CPUFeature::AVX2));
return is_legacy_platform;
#endif
}
static inline bool IsPrimitiveMemOptEnabled() {
static const bool is_primitive_mem_opt_enabled = [] {
bool value = true;
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &value));
return value;
}();
return is_primitive_mem_opt_enabled;
}
#ifdef DNNL_AARCH64_USE_ACL
static int IncrementCounter() {
static std::atomic_int counter{1};
return counter.fetch_add(1);
}
#endif
private:
static inline LRUCache<MklPrimitive>& GetLRUCache() {
static const int kCapacity = 1024;
#if !defined(DNNL_AARCH64_USE_ACL) || !defined(ENABLE_ONEDNN_OPENMP)
static thread_local LRUCache<MklPrimitive> lru_cache_(kCapacity);
#else
static LRUCache<MklPrimitive> lru_cache_(kCapacity);
#endif
return lru_cache_;
}
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_creation_mu_;
condition_variable primitive_creation_cv_;
#endif
};
class FactoryKeyCreator {
public:
FactoryKeyCreator() { key_.reserve(kMaxKeyLength); }
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const dnnl::memory::dims& dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char*>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
void AddAsKey(const void* data) {
auto buffer = reinterpret_cast<const char*>(&data);
Append(StringPiece(buffer, sizeof(data)));
}
string GetKey() { return key_; }
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(string(s));
key_.append(1, delimiter);
}
};
class MklReorderPrimitive : public MklPrimitive {
public:
explicit MklReorderPrimitive(const memory* from, const memory* to)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
Setup(from, to);
}
~MklReorderPrimitive() {}
std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; }
void SetMemory(const memory* from, const memory* to) {
context_.src_mem->set_data_handle(from->get_data_handle());
context_.dst_mem->set_data_handle(to->get_data_handle());
}
std::shared_ptr<dnnl::stream> GetStream() { return stream_; }
private:
struct ReorderContext {
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> dst_mem;
std::shared_ptr<primitive> reorder_prim;
ReorderContext()
: src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {}
} context_;
std::shared_ptr<dnnl::stream> stream_;
void Setup(const memory* from, const memory* to) {
context_.src_mem.reset(
new memory(from->get_desc(), cpu_engine_, DummyData));
context_.dst_mem.reset(new memory(to->get_desc(), cpu_engine_, DummyData));
context_.reorder_prim = std::make_shared<dnnl::reorder>(
reorder(*context_.src_mem, *context_.dst_mem));
stream_.reset(new stream(cpu_engine_));
}
};
template <typename T>
class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderPrimitive* Get(const memory* from, const memory* to) {
auto reorderPrim = static_cast<MklReorderPrimitive*>(
MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderPrimitive(from, to);
MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to,
reorderPrim);
}
reorderPrim->SetMemory(from, to);
return reorderPrim;
}
static MklReorderPrimitiveFactory& GetInstance() {
static MklReorderPrimitiveFactory instance_;
return instance_;
}
static string CreateKey(const memory* from, const memory* to) {
string prefix = "reorder";
FactoryKeyCreator key_creator;
auto const& from_desc = from->GET_MEMORY_DESC;
auto const& to_desc = to->GET_MEMORY_DESC;
memory::dims INIT_DIMS_FROM_DESC(from_dims, from_desc);
memory::dims INIT_DIMS_FROM_DESC(to_dims, to_desc);
auto from_strides = from_desc.GET_STRIDES;
auto from_inner_nblks = from_desc.GET_INNER_NBLKS;
auto from_inner_blks = from_desc.GET_INNER_BLKS;
auto from_inner_idxs = from_desc.GET_INNER_IDXS;
auto to_inner_nblks = to_desc.GET_INNER_NBLKS;
auto to_inner_blks = to_desc.GET_INNER_BLKS;
auto to_inner_idxs = to_desc.GET_INNER_IDXS;
auto to_strides = to_desc.GET_STRIDES;
#ifndef ENABLE_ONEDNN_V3
memory::dims from_inner_blks_1(from_inner_blks,
&from_inner_blks[from_inner_nblks]);
memory::dims from_inner_idxs_1(from_inner_idxs,
&from_inner_idxs[from_inner_nblks]);
memory::dims to_inner_blks_1(to_inner_blks, &to_inner_blks[to_inner_nblks]);
memory::dims to_inner_idxs_1(to_inner_idxs, &to_inner_idxs[to_inner_nblks]);
memory::dims from_strides_outer_blocks(from_strides,
&from_strides[from_desc.ndims]);
memory::dims to_strides_outer_blocks(to_strides,
&to_strides[to_desc.ndims]);
#endif
key_creator.AddAsKey(prefix);
#ifdef DNNL_AARCH64_USE_ACL
key_creator.AddAsKey(std::this_thread::get_id());
#endif
key_creator.AddAsKey(static_cast<int>(from_desc.GET_MEMORY_DESC_FLAGS));
key_creator.AddAsKey(static_cast<int>(from_inner_nblks));
key_creator.AddAsKey(GET_INNER_DIMS(from_inner_blks, from_inner_blks_1));
key_creator.AddAsKey(GET_INNER_DIMS(from_inner_idxs, from_inner_idxs_1));
key_creator.AddAsKey(static_cast<int>(from_desc.GET_DATA_TYPE));
key_creator.AddAsKey(from_dims);
key_creator.AddAsKey(
GET_STRIDES_DIMS(from_strides, from_strides_outer_blocks));
key_creator.AddAsKey(static_cast<int>(to_desc.GET_MEMORY_DESC_FLAGS));
key_creator.AddAsKey(static_cast<int>(to_inner_nblks));
key_creator.AddAsKey(GET_INNER_DIMS(to_inner_blks, to_inner_blks_1));
key_creator.AddAsKey(GET_INNER_DIMS(to_inner_idxs, to_inner_idxs_1));
key_creator.AddAsKey(static_cast<int>(to_desc.GET_DATA_TYPE));
key_creator.AddAsKey(to_dims);
key_creator.AddAsKey(GET_STRIDES_DIMS(to_strides, to_strides_outer_blocks));
return key_creator.GetKey();
}
private:
MklReorderPrimitiveFactory() {}
~MklReorderPrimitiveFactory() {}
MklPrimitive* GetReorder(const memory* from, const memory* to) {
string key = CreateKey(from, to);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
string key = CreateKey(from, to);
this->SetOp(key, op);
}
};
template <typename T>
inline MklReorderPrimitive* FindOrCreateReorder(const memory* from,
const memory* to) {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
MklReorderPrimitive* reorder_prim =
MklReorderPrimitiveFactory<T>::Get(from, to);
return reorder_prim;
}
inline bool IsConv1x1StrideNot1(memory::dims filter_dims,
memory::dims strides) {
if (filter_dims.size() != 4 || strides.size() != 2) return false;
return ((filter_dims[2] == 1) && (filter_dims[3] == 1) &&
((strides[0] != 1) || (strides[1] != 1)));
}
#undef ARE_MEMORY_DESCS_EQUAL
#undef CREATE_MEMORY_DESC_USING_STRIDES
#undef GET_DATA_TYPE
#undef GET_DIMS
#undef GET_INNER_BLKS
#undef GET_INNER_DIMS
#undef GET_INNER_IDXS
#undef GET_INNER_NBLKS
#undef GET_MEMORY_DESC
#undef GET_MEMORY_DESC_FLAGS
#undef GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR
#undef GET_NDIMS
#undef GET_STRIDES
#undef GET_STRIDES_DIMS
#undef INIT_DIMS_FROM_DESC
#undef MEMORY_DESC
}
#define REGISTER_TEST_FLOAT32(TEST) REGISTER_TEST(TEST, DT_FLOAT, Float32Input);
#define REGISTER_TEST_BFLOAT16(TEST) \
REGISTER_TEST(TEST, DT_BFLOAT16, BFloat16Input);
#define REGISTER_TEST_ALL_TYPES(TEST) \
REGISTER_TEST_FLOAT32(TEST); \
REGISTER_TEST_BFLOAT16(TEST);
#else
#define REGISTER_TEST_ALL_TYPES(TEST) REGISTER_TEST_FLOAT32(TEST);
#endif
#endif | #ifdef INTEL_MKL
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(MklUtilTest, MklDnnTfShape) {
auto cpu_engine = engine(engine::kind::cpu, 0);
MklDnnData<float> a(&cpu_engine);
const int N = 1, C = 2, H = 3, W = 4;
memory::dims a_dims = {N, C, H, W};
MklDnnShape a_mkldnn_shape;
a_mkldnn_shape.SetMklTensor(true);
a_mkldnn_shape.SetTfLayout(a_dims.size(), a_dims,
MklTensorFormat::FORMAT_NCHW);
TensorShape a_tf_shape_nchw({N, C, H, W});
TensorShape a_tf_shape_nhwc({N, H, W, C});
TensorShape a_mkldnn_tf_shape = a_mkldnn_shape.GetTfShape();
EXPECT_EQ(a_tf_shape_nchw, a_mkldnn_tf_shape);
EXPECT_NE(a_tf_shape_nhwc, a_mkldnn_tf_shape);
memory::dims b_dims = {N, C, H, W};
MklDnnShape b_mkldnn_shape;
b_mkldnn_shape.SetMklTensor(true);
b_mkldnn_shape.SetTfLayout(b_dims.size(), b_dims,
MklTensorFormat::FORMAT_NHWC);
TensorShape b_tf_shape_nhwc({N, H, W, C});
TensorShape b_tf_shape_nchw({N, C, H, W});
TensorShape b_mkldnn_tf_shape = b_mkldnn_shape.GetTfShape();
EXPECT_EQ(b_tf_shape_nhwc, b_mkldnn_tf_shape);
EXPECT_NE(b_tf_shape_nchw, b_mkldnn_tf_shape);
}
TEST(MklUtilTest, LRUCacheTest) {
size_t capacity = 100;
size_t num_objects = capacity + 10;
LRUCache<int> lru_cache(capacity);
for (int k = 0; k < num_objects; k++) {
lru_cache.SetOp(std::to_string(k), new int(k));
}
for (int k = 0; k < num_objects - capacity; ++k) {
EXPECT_EQ(nullptr, lru_cache.GetOp(std::to_string(k)));
}
for (int k = num_objects - capacity; k < num_objects; ++k) {
int* int_ptr = lru_cache.GetOp(std::to_string(k));
EXPECT_NE(nullptr, int_ptr);
EXPECT_EQ(*int_ptr, k);
}
lru_cache.Clear();
for (int k = 0; k < num_objects; ++k) {
EXPECT_EQ(nullptr, lru_cache.GetOp(std::to_string(k)));
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/mkl_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/mkl_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8017d096-35ec-4ee1-9fbc-145ef10823f3 | cpp | tensorflow/tensorflow | legalize_tf | tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc | tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include <memory>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/variant.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/reproducer.pb.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tsl/lib/monitoring/sampler.h"
#include "xla/xla.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using tpu::FunctionToHloArgs;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
auto* phase2_bridge_compilation_time = tsl::monitoring::Sampler<1>::New(
{"/tensorflow/core/tf2xla/api/v2/phase2_compilation_time",
"The wall-clock time spent on executing graphs in milliseconds.",
"configuration"},
{tsl::monitoring::Buckets::Exponential(1, 1.5, 45)});
constexpr char kBridgeComponent[] = "TFXLABridge";
constexpr char kFullBridge[] = "full_bridge";
namespace {
bool ShouldFallbackToGraphCompiler(
const std::variant<MlirToHloArgs, FunctionToHloArgs>& computation) {
if (computation.index() == 1) return true;
return std::get<0>(computation).rollout_state ==
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
}
void DumpComputationInput(
const tpu::TPUCompileMetadataProto& metadata,
const std::vector<tensorflow::TensorShape>& arg_shapes,
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>
computation) {
if (!VLOG_IS_ON(2)) {
return;
}
tensorflow::mlir::tf2xla::internal::LegalizeMlirToHloReproducer reproducer;
*reproducer.mutable_compile_metadata() = metadata;
for (const auto& shape : arg_shapes) {
shape.AsProto(reproducer.add_input_shapes());
}
switch (computation.index()) {
case 0:
reproducer.set_mlir_module(
std::string(std::get<0>(computation).mlir_module));
break;
case 1: {
auto input = std::get<1>(computation);
*reproducer.mutable_function_def_library() = input.flib_def->ToProto();
} break;
default:
VLOG(2) << "LegalizeMlirToHlo computation input: unknown";
break;
}
std::string string_reproducer;
tensorflow::protobuf::TextFormat::PrintToString(reproducer,
&string_reproducer);
DumpRawStringToFile("legalize_tf_reproducer.textproto", string_reproducer);
}
Status DumpHloCompilationResult(std::string_view name,
XlaCompilationResult* compilation_result) {
if (!VLOG_IS_ON(2) &&
!DEBUG_DATA_DUMPER()->ShouldDump(std::string(name), kDebugGroupMain)) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
auto hlo_module_config,
xla::HloModule::CreateModuleConfigFromProto(
compilation_result->computation->proto(), xla::DebugOptions()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::HloModule> hlo_module,
xla::HloModule::CreateFromProto(compilation_result->computation->proto(),
hlo_module_config));
std::string all_computations;
for (auto computation : hlo_module->computations()) {
all_computations += computation->ToString() + "\n\n";
}
tensorflow::DumpRawStringToFile(name, all_computations);
return absl::OkStatus();
}
}
absl::StatusOr<tensorflow::XlaCompilationResult> LegalizeMlirToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
std::vector<std::unique_ptr<::mlir::Pass>>& custom_legalization_passes,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client) {
CompilationTimer timer;
auto record_time = llvm::make_scope_exit([&timer] {
phase2_bridge_compilation_time->GetCell(kFullBridge)
->Add(timer.ElapsedCyclesInMilliseconds());
});
auto compilation_result = std::make_unique<XlaCompilationResult>();
DumpComputationInput(metadata, arg_shapes, computation);
if (ShouldFallbackToGraphCompiler(computation)) {
TF_RETURN_IF_ERROR(tf2xla::v1::CompileTensorflowGraphToHlo(
computation, metadata, use_tuple_args, shape_determination_fns,
arg_shapes, arg_core_mapping, per_core_arg_shapes, client,
compilation_result.get()));
DumpHloCompilationResult("legalize_tf_fallback.hlo",
compilation_result.get())
.IgnoreError();
return *compilation_result;
}
auto combined_bridge_status = internal::LegalizeTfToHlo(
std::get<0>(computation), metadata, use_tuple_args, device_type,
shape_determination_fns, arg_shapes, arg_core_mapping,
per_core_arg_shapes, custom_legalization_passes, client,
compilation_result.get());
if (combined_bridge_status.ok()) {
VLOG(1) << "Successfully compiled MLIR computation to XLA HLO using "
"Combined MLIR and XlaBuilder Bridge.";
DumpHloCompilationResult("legalize_tf_combined_bridge.hlo",
compilation_result.get())
.IgnoreError();
return *compilation_result;
}
return combined_bridge_status.status();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/monitoring/test_utils.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Not;
using ::testing::TestWithParam;
using tpu::FunctionToHloArgs;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kCompilationTimeStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_time";
static constexpr char kFullBridge[] = "full_bridge";
static constexpr char kCompilationStatusStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_status";
static const char kMlirWithFallbackModeSuccess[] =
"kMlirWithFallbackModeSuccess";
static const char kMlirWithFallbackModeFailure[] =
"kMlirWithFallbackModeFailure";
static const char kOldBridgeMlirFilteredFailure[] =
"kOldBridgeMlirFilteredFailure";
static const char kOldBridgeWithFallbackModeFailure[] =
"kOldBridgeWithFallbackModeFailure";
static const char kOldBridgeMlirFilteredSuccess[] =
"kOldBridgeMlirFilteredSuccess";
static const char kOldBridgeWithFallbackModeSuccess[] =
"kOldBridgeWithFallbackModeSuccess";
static const char kMlirCombinedMlirSuccess[] = "kMlirCombinedMlirSuccess";
static const char kMlirCombinedMlirFailure[] = "kMlirCombinedMlirFailure";
static const char kMlirCombinedOldSuccess[] = "kMlirCombinedOldSuccess";
static const char kMlirCombinedOldFailure[] = "kMlirCombinedOldFailure";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
func.return
}
})";
static constexpr char kBadMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = tf.Unknown() -> ()
func.return %0
}
})";
static constexpr char kUnsupportedMlirBridgeModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%cst0 = "tf.Const"(){ value = dense<0> : tensor<3x5xi1>} : () -> tensor<3x5xi1>
%0 = "tf.Where"(%cst0) : (tensor<3x5xi1>) -> tensor<?x2xi64>
func.return
}
})";
absl::StatusOr<XlaCompiler::CompilationResult> CompileMlirModule(
const char* mlir_module_str,
ConfigProto::Experimental::MlirBridgeRollout rollout_state) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state = rollout_state;
mlir_to_hlo_args.mlir_module = mlir_module_str;
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
tensorflow::tf2xla::internal::ConfigureMetadata(mlir_module_str, arg_shapes,
metadata_proto)
.IgnoreError();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
return LegalizeMlirToHlo(mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
custom_legalization_passes,
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes, client);
}
TEST(LegalizeTFTest, RecordsStreamzForSuccessfulLegalizeWithMlirBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
EXPECT_EQ(compilation_status.Delta(kMlirWithFallbackModeFailure), 0);
}
TEST(LegalizeTFTest, MatMul) {
static constexpr char kMatMulModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<5x11xf32>) {
%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<5x7xf32>} : () -> tensor<5x7xf32>
%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<11x7xf32>} : () -> tensor<11x7xf32>
%1 = "tf.MatMul"(%arg0, %arg1) {transpose_a = false, transpose_b = true} : (tensor<5x7xf32>, tensor<11x7xf32>) -> tensor<5x11xf32>
func.return %1 : tensor<5x11xf32>
}
})";
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMatMulModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
}
struct MatMulTestCase {
std::string mat_mul_method;
};
using BatchMatMulTest = TestWithParam<MatMulTestCase>;
TEST_P(BatchMatMulTest, BatchMatMul) {
const MatMulTestCase& test_case = GetParam();
static constexpr char kMatMulModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<1x4x4xf32>) {
%%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<1x4x2xf32>} : () -> tensor<1x4x2xf32>
%%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<1x2x4xf32>} : () -> tensor<1x2x4xf32>
%%1 = "tf.%s"(%%arg0, %%arg1) {T = f32, adj_x = false, adj_y = false, grad_x = false, grad_y = false, device = ""} : (tensor<1x4x2xf32>, tensor<1x2x4xf32>) -> tensor<1x4x4xf32>
func.return %%1 : tensor<1x4x4xf32>
}
})";
std::string mat_mul_method =
absl::StrFormat(kMatMulModuleStr, test_case.mat_mul_method);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
mat_mul_method.c_str(),
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
}
INSTANTIATE_TEST_SUITE_P(
BatchMatMulTest, BatchMatMulTest,
::testing::ValuesIn<MatMulTestCase>({
{"BatchMatMul"},
{"BatchMatMulV2"},
{"BatchMatMulV3"},
}),
[](const ::testing::TestParamInfo<BatchMatMulTest::ParamType>& info) {
return info.param.mat_mul_method;
});
TEST(LegalizeTFTest, DumpsProducedHLO) {
Env* env = Env::Default();
std::string test_dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::vector<std::string> files;
TF_ASSERT_OK(env->GetChildren(test_dir, &files));
int original_files_size = files.size();
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED));
TF_ASSERT_OK(env->GetChildren(test_dir, &files));
EXPECT_THAT(files.size(), ::testing::Gt(original_files_size));
setenv("TF_DUMP_GRAPH_PREFIX", test_dir.c_str(), 0);
}
TEST(LegalizeTFTest, RecordsStreamzForFailedLegalizeWithMlirBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
auto result = CompileMlirModule(
kBadMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_FALSE(result.ok());
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirFailure), 1);
}
TEST(LegalizeTFTest, RecordsStreamzForSuccessWithCombinedBridge) {
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
auto result = CompileMlirModule(
kUnsupportedMlirBridgeModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_TRUE(result.ok());
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedMlirFailure), 0);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedOldSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kMlirCombinedOldFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeMlirFilteredFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeWithFallbackModeFailure), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeMlirFilteredSuccess), 0);
EXPECT_EQ(compilation_status.Delta(kOldBridgeWithFallbackModeSuccess), 0);
}
TEST(LegalizeTFTest, RecordsStreamzForNoMlirFallback) {
FunctionDef my_func =
tensorflow::FunctionDefHelper::Create("empty", {}, {}, {}, {}, {});
tensorflow::FunctionDefLibrary fdef;
*(fdef.add_function()) = my_func;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), fdef);
OpInputList guaranteed_constants;
NameAttrList function;
FunctionToHloArgs function_to_hlo_args{&function,
&flib_def,
0,
{&guaranteed_constants}};
se::Platform* cpu_platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(cpu_platform).value();
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
absl::StatusOr<XlaCompiler::CompilationResult> compile_result =
LegalizeMlirToHlo(function_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_CPU_JIT",
custom_legalization_passes,
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes, client);
EXPECT_FALSE(compile_result.ok());
}
TEST(LegalizeTFTest, RecordsCompilationTimeForSuccessfulCompilation) {
CellReader<monitoring::testing::Histogram> compilation_time(
kCompilationTimeStreamzName);
TF_ASSERT_OK_AND_ASSIGN(
XlaCompiler::CompilationResult result,
CompileMlirModule(
kMlirModuleStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_ENABLED));
EXPECT_GT(compilation_time.Delta(kFullBridge).num(), 0);
}
TEST(LegalizeTFTest, SuccessfullyCompilesModulesWithReturnValues) {
static constexpr char kHasReturnValuesAndNoMetadataRetvals[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<2xi32>) {
%cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
return %cst : tensor<2xi32>
}
})";
auto compilation_result = CompileMlirModule(
kHasReturnValuesAndNoMetadataRetvals,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
EXPECT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("opcode:.*constant"));
}
TEST(LegalizeTFTest, SkipsTensorListSetItemIfDimensionsTooLarge) {
static constexpr char kTensorListSetItemDimensionTooLarge[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<!tf_type.variant<tensor<64x1xbf16>>> {
%elem_shape = "tf.Const"() <{value = dense<-1> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%num_elements = "tf.Const"() <{value = dense<0> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%list = "tf.TensorListReserve"(%elem_shape, %num_elements) : (tensor<i32>, tensor<i32>) -> tensor<!tf_type.variant<tensor<64x1xbf16>>>
%index = "tf.Const"() <{value = dense<0> : tensor<i32>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<i32>
%element = "tf.Const"() <{value = dense<0.0> : tensor<64x1xbf16>}> {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : () -> tensor<64x1xbf16>
%updated_list = "tf.TensorListSetItem"(%list, %index, %element) : (tensor<!tf_type.variant<tensor<64x1xbf16>>>, tensor<i32>, tensor<64x1xbf16>) -> tensor<!tf_type.variant<tensor<64x1xbf16>>>
return %updated_list : tensor<!tf_type.variant<tensor<64x1xbf16>>>
}
})";
auto compilation_result = CompileMlirModule(
kTensorListSetItemDimensionTooLarge,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
ASSERT_TRUE(compilation_result.ok());
ASSERT_THAT(compilation_result,
Not(ComputationProtoContains("%.*= \"tf.TensorListSetItem")));
ASSERT_THAT(compilation_result,
Not(ComputationProtoContains("%.*=.*DynamicUpdateSlice")));
}
TEST(LegalizeTFTest, LegalizesFunctionWithBoundedDynamicArg) {
static constexpr char kMlirModuleWithBoundedDynamicArgStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [3]>> ) -> (tensor<?xi32, #mhlo.type_extensions<bounds = [3]>>) {
func.return %arg0 : tensor<?xi32, #mhlo.type_extensions<bounds = [3]>>
}
})";
auto compilation_result = CompileMlirModule(
kMlirModuleWithBoundedDynamicArgStr,
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED);
ASSERT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("element_type:.S32\n.*dimensions: 3"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34fb00dd-c8fd-49ca-aa3d-7b157bea692d | cpp | tensorflow/tensorflow | tfrt_session | tensorflow/core/tfrt/tfrt_session/tfrt_session.cc | tensorflow/core/tfrt/tfrt_session/tfrt_session_test.cc | #include "tensorflow/core/tfrt/tfrt_session/tfrt_session.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/die_if_null.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "Eigen/ThreadPool"
#include "llvm/ADT/STLExtras.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/local_session_selection.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/threadpool_options.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/batch_kernel.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace {
class ThreadPoolInterfaceWrapper : public thread::ThreadPoolInterface {
public:
explicit ThreadPoolInterfaceWrapper(Eigen::ThreadPoolInterface* thread_pool)
: thread_pool_{thread_pool} {
DCHECK(thread_pool);
}
void Schedule(std::function<void()> fn) override {
return thread_pool().Schedule(std::move(fn));
}
void ScheduleWithHint(std::function<void()> fn, int start, int end) override {
return thread_pool().ScheduleWithHint(std::move(fn), start, end);
}
void Cancel() override { thread_pool().Cancel(); }
int NumThreads() const override { return thread_pool().NumThreads(); }
int CurrentThreadId() const override {
return thread_pool().CurrentThreadId();
}
private:
Eigen::ThreadPoolInterface& thread_pool() const {
DCHECK(thread_pool_);
return *thread_pool_;
}
Eigen::ThreadPoolInterface* thread_pool_ = nullptr;
};
class TfrtSessionInterOpThreadPools {
public:
TfrtSessionInterOpThreadPools(int size, bool run_in_caller_thread)
: thread_pools_(size), run_in_caller_thread_(run_in_caller_thread) {}
void SetThreadPool(int index, ThreadPoolInterfaceWrapper* thread_pool) {
thread_pools_.at(index) = thread_pool;
}
absl::StatusOr<ThreadPoolInterfaceWrapper*> GetThreadPool(int index) {
if (index < 0 || index >= thread_pools_.size())
return errors::InvalidArgument("Invalid thread pool index ", index);
return thread_pools_[index];
}
bool run_in_caller_thread() const { return run_in_caller_thread_; }
private:
std::vector<ThreadPoolInterfaceWrapper*> thread_pools_;
bool run_in_caller_thread_;
};
class TfrtSession : public tensorflow::Session {
public:
explicit TfrtSession(const SessionOptions& options,
tensorflow::tfrt_stub::Runtime* runtime,
TfrtDeviceInfraTarget device_target,
bool tpu_use_tpu_runner, bool use_gpu,
TfrtSessionInterOpThreadPools inter_op_thread_pools,
bool enable_mlrt,
tensorflow::BackendCompiler* backend_compiler,
std::unique_ptr<StaticDeviceMgr> device_manager)
: runtime_{runtime},
device_target_{device_target},
tpu_use_tpu_runner_{tpu_use_tpu_runner},
use_gpu_{use_gpu},
inter_op_thread_pools_{std::move(inter_op_thread_pools)},
enable_mlrt_(enable_mlrt),
options_{options},
backend_compiler_(backend_compiler),
device_manager_(std::move(device_manager)) {}
Status Create(const GraphDef& graph) override {
return Create(GraphDef(graph));
}
Status Create(GraphDef&& graph) override {
absl::MutexLock lock(&session_state_lock_);
return CreateLocked(std::move(graph));
}
Status CreateLocked(GraphDef graph)
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (graph.node_size() == 0) {
LOG(ERROR) << "Ignoring empty graph.";
return absl::OkStatus();
}
if (session_state_ == SessionState::kCreated) {
return errors::AlreadyExists(
"A Graph has already been created for this session.");
}
TF_RETURN_IF_ERROR(CheckNotClosedLocked());
auto options = GetGraphExecutionOptions();
tensorflow::tfrt_stub::UpdateTpuTargetByBridgeCompatibility(options, graph);
auto* nodes = graph.mutable_node();
for (auto it = nodes->begin(), end = nodes->end(); it != end; ++it) {
if (it->name() == "ConfigureDistributedTPU") {
nodes->erase(it);
break;
}
}
auto session_options =
tensorflow::tfrt_stub::CreateDefaultSessionOptions(options);
session_options.config.mutable_experimental()
->set_optimize_for_static_graph(
options_.config.experimental().optimize_for_static_graph());
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(
options_.config.experimental().disable_optimize_for_static_graph());
LOG_FIRST_N(INFO, 10) << "SessionOptions: "
<< session_options.config.DebugString();
const auto& fdef_lib = graph.library();
TF_ASSIGN_OR_RETURN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::CreateWithDeviceMgr(
session_options, fdef_lib, device_manager_.get()));
auto kernel_registry = std::make_unique<mlrt::KernelRegistry>();
tensorflow::tf_mlrt::RegisterTfMlrtKernels(*kernel_registry);
tensorflow::tf_mlrt::RegisterTfMlrtBatchKernels(*kernel_registry);
auto resource_context = std::make_unique<tfrt::ResourceContext>();
tfrt_stub::ModelRuntimeContext model_context(
&options, "unknown_export_dir", resource_context.get());
model_context.set_graph_def(&graph);
model_context.set_device_mgr(&fallback_state->device_manager());
model_context.set_is_local_session(
!options_.config.experimental().enable_multi_host() &&
!options_.config.experimental().tfrt_use_ifrt());
TF_RETURN_IF_ERROR(options.runtime->CreateRuntimeResources(model_context));
GraphOptimizationPassOptions optimization_options;
optimization_options.session_options = &options_;
FunctionLibraryDefinition flib_def = fallback_state->func_lib_def();
optimization_options.flib_def = &flib_def;
std::unordered_map<string, std::unique_ptr<Graph>> partition_graphs;
auto initial_graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
tensorflow::GraphConstructorOptions opts;
opts.allow_internal_ops = true;
TF_RETURN_IF_ERROR(
tensorflow::ConvertGraphDefToGraph(opts, graph, initial_graph.get()));
partition_graphs["graph"] = std::move(initial_graph);
optimization_options.partition_graphs = &partition_graphs;
OptimizationPassRegistry::Global()->LogAllGroupings(1);
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PARTITIONING, optimization_options));
LOG_FIRST_N(INFO, 10) << "GraphExecutionOptions: " << options;
TF_ASSIGN_OR_RETURN(
graph_executor_,
tensorflow::tfrt_stub::GraphExecutor::Create(
options, std::move(fallback_state), std::move(resource_context),
std::move(graph), std::move(kernel_registry)));
session_state_ = SessionState::kCreated;
return absl::OkStatus();
}
Status Extend(const GraphDef& graph) override {
return Extend(GraphDef(graph));
}
Status Extend(GraphDef&& graph) override {
absl::MutexLock lock(&session_state_lock_);
return ExtendLocked(std::move(graph));
}
Status ExtendLocked(GraphDef graph)
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (session_state_ == SessionState::kCreated) {
return graph_executor_->Extend(graph);
}
return CreateLocked(std::move(graph));
}
Status RunInternal(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs,
const thread::ThreadPoolOptions& thread_pool_options) {
{
absl::MutexLock lock(&session_state_lock_);
if (session_state_ == SessionState::kInitialized) {
return errors::Unavailable("Session not created yet.");
}
TF_RETURN_IF_ERROR(CheckNotClosedLocked());
}
DCHECK(outputs || output_tensor_names.empty()) << "No outputs in Run()";
tensorflow::tfrt_stub::GraphExecutionRunOptions
graph_execution_run_options{};
if (run_options.timeout_in_ms() > 0) {
graph_execution_run_options.deadline = absl::ToChronoTime(
absl::Now() + absl::Milliseconds(run_options.timeout_in_ms()));
}
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface> work_queue;
auto* const intra_op_thread_pool = thread_pool_options.intra_op_threadpool;
if (inter_op_thread_pools_.run_in_caller_thread() ||
run_options.inter_op_thread_pool() == -1) {
work_queue = tfrt_stub::WrapDefaultWorkQueue(
tfrt::CreateSingleThreadedWorkQueue(), intra_op_thread_pool);
} else if (thread_pool_options.inter_op_threadpool != nullptr) {
work_queue =
std::make_unique<tensorflow::tfrt_stub::TfThreadPoolWorkQueue>(
tfrt::GetUniqueInt(), intra_op_thread_pool,
thread_pool_options.inter_op_threadpool);
} else {
TF_ASSIGN_OR_RETURN(auto* thread_pool,
inter_op_thread_pools_.GetThreadPool(
run_options.inter_op_thread_pool()));
work_queue =
std::make_unique<tensorflow::tfrt_stub::TfThreadPoolWorkQueue>(
tfrt::GetUniqueInt(), intra_op_thread_pool, thread_pool);
}
graph_execution_run_options.work_queue = work_queue.get();
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(graph_executor_->Run(
graph_execution_run_options, inputs, output_tensor_names,
target_node_names, &output_tensors));
if (outputs) {
DCHECK_EQ(output_tensors.size(), output_tensor_names.size());
outputs->swap(output_tensors);
} else {
DCHECK(output_tensor_names.empty()) << "No outputs in Run()";
}
return absl::OkStatus();
}
Status Run(const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs) override {
return RunInternal(RunOptions{}, inputs, output_tensor_names,
target_node_names, outputs, {});
}
Status Run(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata) override {
return Run(run_options, inputs, output_tensor_names, target_node_names,
outputs, run_metadata, {});
}
Status Run(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_tensor_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& thread_pool_options) override {
return RunInternal(run_options, inputs, output_tensor_names,
target_tensor_names, outputs, thread_pool_options);
}
Status MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) override {
absl::MutexLock lock(&callables_lock_);
*out_handle = next_callable_handle_++;
assert(callables_.find(*out_handle) == callables_.end());
callables_[*out_handle] = {callable_options};
return absl::OkStatus();
}
Status RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) override {
return RunCallable(handle, feed_tensors, fetch_tensors, run_metadata, {});
}
Status RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& thread_pool_options) override {
Callable callable;
{
absl::MutexLock lock(&callables_lock_);
auto it = callables_.find(handle);
if (it == callables_.end())
return errors::InvalidArgument("No such callable handle: ", handle);
callable = it->second;
}
if (callable.callable_options.feed_size() != feed_tensors.size())
return errors::InvalidArgument("Invalid number of feed tensors");
std::vector<std::pair<std::string, Tensor>> inputs;
for (const auto& it :
llvm::zip(callable.callable_options.feed(), feed_tensors)) {
inputs.emplace_back(std::make_pair(std::get<0>(it), std::get<1>(it)));
}
std::vector<std::string> output_tensor_names;
for (const auto& tensor_name : callable.callable_options.fetch()) {
output_tensor_names.emplace_back(tensor_name);
}
std::vector<std::string> target_node_names;
for (const auto& node_name : callable.callable_options.target()) {
target_node_names.emplace_back(node_name);
}
return Run(inputs, output_tensor_names, target_node_names, fetch_tensors);
}
Status ReleaseCallable(CallableHandle handle) override {
absl::MutexLock lock(&callables_lock_);
auto it = callables_.find(handle);
if (it == callables_.end())
return errors::InvalidArgument("No such callable handle: ", handle);
callables_.erase(it);
return absl::OkStatus();
}
Status Close() override {
absl::MutexLock lock(&session_state_lock_);
session_state_ = SessionState::kClosed;
return absl::OkStatus();
}
Status ListDevices(std::vector<DeviceAttributes>* response) override {
return errors::Unimplemented("TfrtSession::ListDevices is Unimplemented.");
}
Status LocalDeviceManager(const DeviceMgr** output) override {
*output = device_manager_.get();
return absl::OkStatus();
}
Status Finalize() override { return absl::OkStatus(); }
private:
tfrt::HostContext* GetHostContext() {
return runtime_->core_runtime()->GetHostContext();
}
tensorflow::tfrt_stub::GraphExecutionOptions GetGraphExecutionOptions()
const {
::tensorflow::tfrt_stub::GraphExecutionOptions options(runtime_);
auto& compile_options = options.compile_options;
compile_options.variable_device =
DeviceNameUtils::FullName("localhost", 0,
0, "CPU", 0);
compile_options.enable_grappler = true;
compile_options.device_target = device_target_;
compile_options.tpu_fuse_ops = tpu_use_tpu_runner_;
compile_options.hoist_invariant_ops = true;
compile_options.sink_in_invariant_ops = false;
compile_options.cost_threshold = 1024;
if (use_gpu_) {
options.enable_tfrt_gpu = true;
options.enable_grappler_function_optimizer = true;
}
compile_options.use_tpu_host_allocator_for_inputs = tpu_use_tpu_runner_;
options.compile_options.backend_compiler = backend_compiler_;
options.model_metadata = options_.config.experimental().session_metadata();
options.enable_mlrt = enable_mlrt_;
return options;
}
Status CheckNotClosedLocked() const
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (session_state_ == SessionState::kClosed) {
return errors::Cancelled("Session has been closed.");
}
return absl::OkStatus();
}
struct Callable {
CallableOptions callable_options;
};
enum class SessionState {
kInitialized,
kCreated,
kClosed,
};
mutable absl::Mutex session_state_lock_;
SessionState session_state_ TF_GUARDED_BY(session_state_lock_) =
SessionState::kInitialized;
std::unique_ptr<::tensorflow::tfrt_stub::GraphExecutor> graph_executor_;
tensorflow::tfrt_stub::Runtime* runtime_ = nullptr;
const TfrtDeviceInfraTarget device_target_;
const bool tpu_use_tpu_runner_;
const bool use_gpu_;
TfrtSessionInterOpThreadPools inter_op_thread_pools_;
mutable absl::Mutex callables_lock_;
CallableHandle next_callable_handle_ TF_GUARDED_BY(callables_lock_) = 0;
absl::flat_hash_map<CallableHandle, Callable> callables_
TF_GUARDED_BY(callables_lock_);
bool enable_mlrt_ = false;
SessionOptions options_ = SessionOptions();
tensorflow::BackendCompiler* backend_compiler_ = nullptr;
std::unique_ptr<StaticDeviceMgr> device_manager_;
};
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>
CreateRunHandlerWorkQueue(const TfrtThreadpoolOptions& session_options) {
int num_complementary_threads =
std::max(1, session_options.num_main_threads / 2);
tfrt::tf::RunHandlerThreadWorkQueue::Options options;
options.num_main_threads =
session_options.num_main_threads;
options.num_complementary_threads = num_complementary_threads;
options.init_timeout_ms =
absl::ToInt64Milliseconds(session_options.init_timeout);
options.max_concurrent_handler =
session_options.max_concurrent_handler;
options.num_sub_thread_pool =
session_options.num_sub_thread_pool;
std::vector<int> num_threads;
const int num_threads_per_pool =
options.num_main_threads / options.num_sub_thread_pool;
num_threads.resize(options.num_sub_thread_pool - 1, num_threads_per_pool);
num_threads.push_back(options.num_main_threads -
(options.num_sub_thread_pool - 1) *
num_threads_per_pool);
options.num_threads_in_sub_thread_pool = num_threads;
options.sub_thread_request_percentage = {1.0};
options.use_adaptive_waiting_time = true;
LOG_FIRST_N(INFO, 10) << "RunHandlerThreadWorkQueue Options: " << options;
return std::make_unique<tfrt::tf::RunHandlerThreadWorkQueue>(options);
}
}
class TfrtSessionFactory::ThreadPoolManager {
public:
absl::StatusOr<TfrtSessionInterOpThreadPools> UpdateAndGetInterOpThreadPools(
const SessionOptions& options) {
if (options.config.inter_op_parallelism_threads() > 0) {
LOG(WARNING) << "TFRT session does not support positive "
"inter_op_parallelism_threads for now";
}
if (options.config.use_per_session_threads()) {
return errors::InvalidArgument(
"TFRT session does not yet support use_per_session_threads()");
}
auto session_inter_op_thread_pool_size =
options.config.session_inter_op_thread_pool_size();
if (session_inter_op_thread_pool_size > 0) {
TfrtSessionInterOpThreadPools inter_op_thread_pools{
session_inter_op_thread_pool_size, false};
for (const auto& it :
llvm::enumerate(options.config.session_inter_op_thread_pool())) {
const ThreadPoolOptionProto& pool_options = it.value();
auto pool_index = it.index();
auto num_threads = pool_options.num_threads();
if (num_threads != 0) {
TF_ASSIGN_OR_RETURN(
auto* thread_pool,
GetOrCreateThreadPool(options.env, pool_options, pool_index));
inter_op_thread_pools.SetThreadPool(pool_index, thread_pool);
} else {
inter_op_thread_pools.SetThreadPool(pool_index,
GlobalThreadPool(options));
}
}
return inter_op_thread_pools;
} else if (options.config.inter_op_parallelism_threads() < 0) {
return TfrtSessionInterOpThreadPools{0,
true};
} else if (session_inter_op_thread_pool_size == 0) {
TfrtSessionInterOpThreadPools session_thread_pool_options{
1, false};
session_thread_pool_options.SetThreadPool(0, GlobalThreadPool(options));
return session_thread_pool_options;
} else {
return errors::InvalidArgument(
"session_inter_op_thread_pool_size must be >= 0");
}
}
private:
class ThreadPoolWithNumThreads {
public:
ThreadPoolWithNumThreads(int num_thread,
std::unique_ptr<thread::ThreadPool> thread_pool)
: num_threads_(num_thread),
thread_pool_(std::move(thread_pool)),
thread_pool_interface_wrapper_(
ABSL_DIE_IF_NULL(thread_pool_)->AsEigenThreadPool()) {}
int num_threads() const { return num_threads_; }
ThreadPoolInterfaceWrapper* thread_pool_interface_wrapper() {
return &thread_pool_interface_wrapper_;
}
private:
int num_threads_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
ThreadPoolInterfaceWrapper thread_pool_interface_wrapper_;
};
ThreadPoolInterfaceWrapper* GlobalThreadPool(const SessionOptions& options) {
static thread::ThreadPool* const thread_pool =
NewThreadPoolFromSessionOptions(options);
static auto* const wrapper =
new ThreadPoolInterfaceWrapper{thread_pool->AsEigenThreadPool()};
return wrapper;
}
absl::StatusOr<ThreadPoolInterfaceWrapper*> GetOrCreateThreadPool(
Env* env, const ThreadPoolOptionProto& pool_options, int pool_index) {
const int32_t num_threads = pool_options.num_threads();
CHECK_GT(num_threads, 0);
const std::string& name = pool_options.global_name();
if (name.empty()) {
return errors::InvalidArgument(
"TFRT session does not yet support session local thread pool");
}
absl::MutexLock lock(&mutex_);
auto it = named_thread_pools_.find(name);
if (it != named_thread_pools_.end()) {
if (it->second->num_threads() != num_threads) {
return errors::InvalidArgument(
"TfrtSession thread pool ", name,
" configured previously with num_threads=",
it->second->num_threads(),
"; cannot re-configure with num_threads=", num_threads);
}
return it->second->thread_pool_interface_wrapper();
}
auto thread_pool = std::make_unique<thread::ThreadPool>(
env, ThreadOptions(), absl::StrCat("TfrtSessionInter", pool_index),
num_threads, false,
nullptr);
auto ret = named_thread_pools_.emplace(
name, std::make_unique<ThreadPoolWithNumThreads>(
num_threads, std::move(thread_pool)));
CHECK(ret.second);
return ret.first->second->thread_pool_interface_wrapper();
}
mutable absl::Mutex mutex_;
absl::flat_hash_map<std::string, std::unique_ptr<ThreadPoolWithNumThreads>>
named_thread_pools_ ABSL_GUARDED_BY(mutex_);
};
TfrtSessionFactory::TfrtSessionFactory()
: thread_pool_manager_(std::make_unique<ThreadPoolManager>()) {}
class InitializerRegistry {
public:
static InitializerRegistry& Get() {
static auto* reg = new InitializerRegistry();
return *reg;
}
void Register(TfrtSessionFactory::RuntimeInitializer initializer) {
DCHECK(initializer_ == nullptr);
initializer_ = initializer;
}
absl::Status RunInitializer(tfrt_stub::Runtime* runtime) {
LOG(INFO) << "Running Initializer within TfrtSessionFactory.";
TF_RETURN_IF_ERROR(initializer_ ? initializer_(runtime) : absl::OkStatus());
return absl::OkStatus();
}
private:
TfrtSessionFactory::RuntimeInitializer initializer_;
};
void TfrtSessionFactory::RegisterInitializer(RuntimeInitializer initializer) {
InitializerRegistry::Get().Register(std::move(initializer));
}
Status TfrtSessionFactory::InitializeLocked(const TfrtSessionOptions& options) {
mutex_.AssertHeld();
if (options.use_tpu) {
DCHECK(!options.backend_compiler);
DCHECK(!options.use_gpu);
device_target_ = TfrtDeviceInfraTarget::kTpurt;
tpu_use_tpu_runner_ = true;
} else if (options.use_gpu) {
DCHECK(!options.backend_compiler);
device_target_ = TfrtDeviceInfraTarget::kGpu;
use_gpu_ = true;
} else if (options.backend_compiler) {
backend_compiler_ = options.backend_compiler;
}
LOG(INFO) << "Start initializing TfrtSession";
if (options.runtime != nullptr) {
runtime_ = options.runtime;
} else if (runtime_ == nullptr) {
owned_runtime_ = tensorflow::tfrt_stub::Runtime::Create(
CreateRunHandlerWorkQueue(options.threadpool_options));
runtime_ = owned_runtime_.get();
}
enable_mlrt_ = options.enable_mlrt;
return absl::OkStatus();
}
bool TfrtSessionFactory::AcceptsOptions(const SessionOptions& options) {
if (options.target == "tfrt_session") return true;
if (options.target.empty()) {
return options.config.experimental().use_tfrt() ||
GetDefaultLocalSessionImpl() == LocalSessionImpl::kTfrtSession;
}
return false;
}
Status TfrtSessionFactory::NewSession(const SessionOptions& options,
Session** out_session)
TF_LOCKS_EXCLUDED(mutex_) {
if (options.config.intra_op_parallelism_threads() != 0) {
LOG(WARNING) << "TFRT session ignores intra_op_parallelism_threads. "
"Intra-op thread "
"pool can only be configured by `Run()`";
}
*out_session = nullptr;
absl::MutexLock lock(&mutex_);
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
device_manager_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
if (!IsInitialized()) {
TF_RETURN_IF_ERROR(InitializeLocked({}));
TF_RETURN_IF_ERROR(InitializerRegistry::Get().RunInitializer(runtime_));
}
TF_ASSIGN_OR_RETURN(
auto inter_op_thread_pools,
thread_pool_manager_->UpdateAndGetInterOpThreadPools(options));
auto* backend_compiler = (options.config.experimental().enable_multi_host() ||
options.config.experimental().tfrt_use_ifrt())
? backend_compiler_
: nullptr;
*out_session =
new TfrtSession(options, runtime_, device_target_, tpu_use_tpu_runner_,
use_gpu_, std::move(inter_op_thread_pools), enable_mlrt_,
backend_compiler, std::move(device_manager_));
return absl::OkStatus();
}
namespace {
static TfrtSessionFactory* session_factory = nullptr;
}
tfrt_stub::Runtime* TfrtSessionFactory::GetRuntime() {
DCHECK(session_factory != nullptr);
absl::MutexLock lock(&session_factory->mutex_);
return session_factory->runtime_;
}
Status InitializeTfrtSession(const TfrtSessionOptions& options) {
DCHECK(session_factory != nullptr);
absl::MutexLock lock(&session_factory->mutex_);
DCHECK(!session_factory->IsInitialized());
return UpdateTfrtSessionOptionsLocked(options);
}
Status UpdateTfrtSessionOptionsLocked(const TfrtSessionOptions& options) {
DCHECK(session_factory != nullptr);
session_factory->mutex_.AssertHeld();
return session_factory->InitializeLocked(options);
}
static const bool kFactoryRgistration = [] {
session_factory = new TfrtSessionFactory();
LOG(INFO) << "Registering TfrtSession";
SessionFactory::Register("tfrt_session", session_factory);
return true;
}();
} | #include "tensorflow/core/tfrt/tfrt_session/tfrt_session.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/time/time.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool_options.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
class TfrtSessionEnvironment : public ::testing::Environment {
public:
void SetUp() override {
TfrtSessionOptions options{
.threadpool_options = tensorflow::TfrtThreadpoolOptions{
.num_main_threads = tensorflow::port::MaxParallelism(),
.init_timeout = absl::Milliseconds(100),
.max_concurrent_handler = 128,
.num_sub_thread_pool = 1}};
TF_ASSERT_OK(InitializeTfrtSession(options));
}
};
class TfrtSessionTest : public ::testing::Test {
protected:
void SetUp() override {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
auto* model_metadata =
options.config.mutable_experimental()->mutable_session_metadata();
model_metadata->set_name("toy_v1");
model_metadata->set_version(0);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
TF_ASSERT_OK(session_->Run({}, {},
{"init"}, nullptr));
inputs_.push_back(std::make_pair(
"input1", test::AsTensor<int32_t>({1, 1, 1}, TensorShape{1, 3})));
inputs_.push_back(std::make_pair(
"input2", test::AsTensor<int32_t>({2, 2, 2}, TensorShape{1, 3})));
inputs_.push_back(std::make_pair(
"input3", test::AsTensor<int32_t>({3, 3, 3}, TensorShape{1, 3})));
}
std::unique_ptr<Session> session_;
std::vector<std::pair<std::string, Tensor>> inputs_;
std::vector<std::string> output_tensor_names_{"result1", "result21",
"result31"};
std::vector<std::string> target_node_names_{"result22", "result32"};
};
TEST_F(TfrtSessionTest, NoTargetNodes) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_,
{}, &outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, RunOptions) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
auto* model_metadata =
options.config.mutable_experimental()->mutable_session_metadata();
model_metadata->set_name("toy_v1");
model_metadata->set_version(0);
auto session = absl::WrapUnique(NewSession(options));
ASSERT_TRUE(session != nullptr);
tensorflow::GraphDef graph_def;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
node: {
name: "input"
op: "Placeholder"
attr: {
key: "dtype"
value: { type: DT_INT32 }
}
}
node: {
name: "sleep_seconds"
op: "Const"
attr: {
key: "dtype"
value: { type: DT_INT32 }
}
attr: {
key: "value"
value: {
tensor: {
tensor_shape: {}
dtype: DT_INT32
int_val: 2
}
}
}
}
node: {
name: "sleep"
op: "SleepIdentityOp"
input: "sleep_seconds:0"
input: "input:0"
attr: {
key: "T"
value: { type: DT_INT32 }
}
})pb"
,
&graph_def));
TF_ASSERT_OK(session->Create(graph_def));
std::vector<Tensor> outputs;
RunMetadata run_metadata;
TF_ASSERT_OK(session->Run(
RunOptions{},
{{"input", test::AsTensor<int32_t>({1}, TensorShape{1})}},
{"sleep"},
{}, &outputs, &run_metadata));
ASSERT_EQ(outputs.size(), 1);
test::ExpectEqual(outputs[0], test::AsTensor<int32_t>({1}, TensorShape{1}));
RunOptions run_options;
run_options.set_timeout_in_ms(1);
auto status = session->Run(
run_options,
{{"input", test::AsTensor<int32_t>({1}, TensorShape{1})}},
{"sleep"},
{}, &outputs, &run_metadata);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(), ::testing::HasSubstr("Deadline exceeded"));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool intra_op_thread_pool("tf_intra",
1);
tfrt_stub::TfThreadPool inter_op_thread_pool(
"tf_inter",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = &inter_op_thread_pool,
.intra_op_threadpool = &intra_op_thread_pool};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions_OnlyInter) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool inter_op_thread_pool(
"tf_inter",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = &inter_op_thread_pool,
.intra_op_threadpool = nullptr};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions_OnlyIntra) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool intra_op_thread_pool("tf_intra",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = nullptr,
.intra_op_threadpool = &intra_op_thread_pool};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, RunInCallerThreadSessionOptions) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_inter_op_parallelism_threads(-1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(
{}, {}, {},
{"init"}, nullptr, &run_metadata));
}
TEST_F(TfrtSessionTest, RunInCallerThreadRunOptions) {
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.set_inter_op_thread_pool(-1);
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(run_options, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, DeviceManager) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_inter_op_parallelism_threads(-1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
const DeviceMgr* device_manager;
TF_ASSERT_OK(session_->LocalDeviceManager(&device_manager));
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(
{}, {}, {},
{"init"}, nullptr, &run_metadata));
const DeviceMgr* device_manager_final;
TF_ASSERT_OK(session_->LocalDeviceManager(&device_manager_final));
ASSERT_EQ(device_manager, device_manager_final);
}
TEST_F(TfrtSessionTest, IntraOpThreadPoolOptionWarning) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_intra_op_parallelism_threads(1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
}
TEST_F(TfrtSessionTest, Callable) {
CallableOptions callable_options;
std::vector<Tensor> feed_tensors;
for (auto& input : inputs_) {
callable_options.add_feed(input.first);
feed_tensors.emplace_back(input.second);
}
for (auto& output : output_tensor_names_) {
callable_options.add_fetch(output);
}
for (auto& target : target_node_names_) {
callable_options.add_target(target);
}
Session::CallableHandle callable_handle;
TF_ASSERT_OK(session_->MakeCallable(callable_options, &callable_handle));
std::vector<Tensor> outputs;
RunMetadata run_metadata;
TF_ASSERT_OK(session_->RunCallable(callable_handle, feed_tensors, &outputs,
&run_metadata));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
TF_ASSERT_OK(session_->ReleaseCallable(callable_handle));
}
TEST_F(TfrtSessionTest, Finalize) { TF_ASSERT_OK(session_->Finalize()); }
TEST_F(TfrtSessionTest, WithTargetNodes) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_, target_node_names_,
&outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, CreateWithEmptyGraphIsNoop) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
TF_ASSERT_OK(session_->Create(GraphDef()));
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
}
TEST_F(TfrtSessionTest, CreateAgainError) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Create(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr(
"A Graph has already been created for this session."));
}
TEST_F(TfrtSessionTest, CreateAfterCloseError) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
TF_ASSERT_OK(session_->Close());
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Create(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, ExtendWhenNotCreated) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Extend(meta_graph_def.graph_def()));
TF_ASSERT_OK(session_->Run({}, {},
{"init"}, nullptr));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_,
{}, &outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ExtendAfterCreate) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
TF_ASSERT_OK(session_->Create(graph_def));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(session_->Extend(extension));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", tensorflow::tfrt_stub::CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(tensorflow::tfrt_stub::GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(TfrtSessionTest, ExtendAfterCreate_ErrorWithStaticGraphOptimization) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.mutable_experimental()->set_optimize_for_static_graph(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
TF_ASSERT_OK(session_->Create(graph_def));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
auto status = session_->Extend(extension);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.ToString(),
::testing::HasSubstr("Extending the graph is not supported when"));
}
TEST_F(TfrtSessionTest, ExtendAfterCloseError) {
TF_ASSERT_OK(session_->Close());
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Extend(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, RunAfterCloseError) {
TF_ASSERT_OK(session_->Close());
std::vector<Tensor> outputs;
auto status = session_->Run(inputs_, output_tensor_names_,
{}, &outputs);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, InitializeTwiceCrashes) {
TfrtSessionOptions options;
auto second_initialize = [](TfrtSessionOptions options) {
auto status = InitializeTfrtSession(options);
TF_ASSERT_OK(status);
};
ASSERT_DEBUG_DEATH(second_initialize(options), "");
}
TEST_F(TfrtSessionTest, GetRuntime) {
auto runtime = TfrtSessionFactory::GetRuntime();
EXPECT_NE(runtime, nullptr);
}
TEST_F(TfrtSessionTest, RegisterTwiceCrashes) {
TfrtSessionFactory::RegisterInitializer(
[](tfrt_stub::Runtime*) { return absl::OkStatus(); });
ASSERT_DEBUG_DEATH(TfrtSessionFactory::RegisterInitializer(
[](tfrt_stub::Runtime*) { return absl::OkStatus(); }),
"");
}
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
testing::AddGlobalTestEnvironment(new tensorflow::TfrtSessionEnvironment());
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/tfrt_session/tfrt_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/tfrt_session/tfrt_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ec14302-5a69-4a3f-b8cd-f1cb4b4633c8 | cpp | abseil/abseil-cpp | usage | absl/flags/internal/usage.cc | absl/flags/internal/usage_test.cc | #include "absl/flags/internal/usage.h"
#include <stdint.h>
#include <algorithm>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <map>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/commandlineflag.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/flag.h"
#include "absl/flags/internal/path_util.h"
#include "absl/flags/internal/private_handle_accessor.h"
#include "absl/flags/internal/program_name.h"
#include "absl/flags/internal/registry.h"
#include "absl/flags/usage_config.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/synchronization/mutex.h"
bool FLAGS_help = false;
bool FLAGS_helpfull = false;
bool FLAGS_helpshort = false;
bool FLAGS_helppackage = false;
bool FLAGS_version = false;
bool FLAGS_only_check_args = false;
bool FLAGS_helpon = false;
bool FLAGS_helpmatch = false;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace {
using PerFlagFilter = std::function<bool(const absl::CommandLineFlag&)>;
constexpr size_t kHrfMaxLineLength = 80;
class XMLElement {
public:
XMLElement(absl::string_view tag, absl::string_view txt)
: tag_(tag), txt_(txt) {}
friend std::ostream& operator<<(std::ostream& out,
const XMLElement& xml_elem) {
out << "<" << xml_elem.tag_ << ">";
for (auto c : xml_elem.txt_) {
switch (c) {
case '"':
out << """;
break;
case '\'':
out << "'";
break;
case '&':
out << "&";
break;
case '<':
out << "<";
break;
case '>':
out << ">";
break;
case '\n':
case '\v':
case '\f':
case '\t':
out << " ";
break;
default:
if (IsValidXmlCharacter(static_cast<unsigned char>(c))) {
out << c;
}
break;
}
}
return out << "</" << xml_elem.tag_ << ">";
}
private:
static bool IsValidXmlCharacter(unsigned char c) { return c >= 0x20; }
absl::string_view tag_;
absl::string_view txt_;
};
class FlagHelpPrettyPrinter {
public:
FlagHelpPrettyPrinter(size_t max_line_len, size_t min_line_len,
size_t wrapped_line_indent, std::ostream& out)
: out_(out),
max_line_len_(max_line_len),
min_line_len_(min_line_len),
wrapped_line_indent_(wrapped_line_indent),
line_len_(0),
first_line_(true) {}
void Write(absl::string_view str, bool wrap_line = false) {
if (str.empty()) return;
std::vector<absl::string_view> tokens;
if (wrap_line) {
for (auto line : absl::StrSplit(str, absl::ByAnyChar("\n\r"))) {
if (!tokens.empty()) {
tokens.emplace_back("\n");
}
for (auto token :
absl::StrSplit(line, absl::ByAnyChar(" \t"), absl::SkipEmpty())) {
tokens.push_back(token);
}
}
} else {
tokens.push_back(str);
}
for (auto token : tokens) {
bool new_line = (line_len_ == 0);
if (token == "\n") {
EndLine();
continue;
}
if (!new_line && (line_len_ + token.size() >= max_line_len_)) {
EndLine();
new_line = true;
}
if (new_line) {
StartLine();
} else {
out_ << ' ';
++line_len_;
}
out_ << token;
line_len_ += token.size();
}
}
void StartLine() {
if (first_line_) {
line_len_ = min_line_len_;
first_line_ = false;
} else {
line_len_ = min_line_len_ + wrapped_line_indent_;
}
out_ << std::string(line_len_, ' ');
}
void EndLine() {
out_ << '\n';
line_len_ = 0;
}
private:
std::ostream& out_;
const size_t max_line_len_;
const size_t min_line_len_;
const size_t wrapped_line_indent_;
size_t line_len_;
bool first_line_;
};
void FlagHelpHumanReadable(const CommandLineFlag& flag, std::ostream& out) {
FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 4, 2, out);
printer.Write(absl::StrCat("--", flag.Name()));
printer.Write(absl::StrCat("(", flag.Help(), ");"), true);
std::string dflt_val = flag.DefaultValue();
std::string curr_val = flag.CurrentValue();
bool is_modified = curr_val != dflt_val;
if (flag.IsOfType<std::string>()) {
dflt_val = absl::StrCat("\"", dflt_val, "\"");
}
printer.Write(absl::StrCat("default: ", dflt_val, ";"));
if (is_modified) {
if (flag.IsOfType<std::string>()) {
curr_val = absl::StrCat("\"", curr_val, "\"");
}
printer.Write(absl::StrCat("currently: ", curr_val, ";"));
}
printer.EndLine();
}
void FlagsHelpImpl(std::ostream& out, PerFlagFilter filter_cb,
HelpFormat format, absl::string_view program_usage_message) {
if (format == HelpFormat::kHumanReadable) {
out << flags_internal::ShortProgramInvocationName() << ": "
<< program_usage_message << "\n\n";
} else {
out << "<?xml version=\"1.0\"?>\n"
<< "<!-- This output should be used with care. We do not report type "
"names for flags with user defined types -->\n"
<< "<!-- Prefer flag only_check_args for validating flag inputs -->\n"
<< "<AllFlags>\n"
<< XMLElement("program", flags_internal::ShortProgramInvocationName())
<< '\n'
<< XMLElement("usage", program_usage_message) << '\n';
}
std::map<std::string,
std::map<std::string, std::vector<const absl::CommandLineFlag*>>>
matching_flags;
flags_internal::ForEachFlag([&](absl::CommandLineFlag& flag) {
if (flag.IsRetired()) return;
if (flag.Help() == flags_internal::kStrippedFlagHelp) return;
if (!filter_cb(flag)) return;
std::string flag_filename = flag.Filename();
matching_flags[std::string(flags_internal::Package(flag_filename))]
[flag_filename]
.push_back(&flag);
});
absl::string_view package_separator;
absl::string_view file_separator;
for (auto& package : matching_flags) {
if (format == HelpFormat::kHumanReadable) {
out << package_separator;
package_separator = "\n\n";
}
file_separator = "";
for (auto& flags_in_file : package.second) {
if (format == HelpFormat::kHumanReadable) {
out << file_separator << " Flags from " << flags_in_file.first
<< ":\n";
file_separator = "\n";
}
std::sort(std::begin(flags_in_file.second),
std::end(flags_in_file.second),
[](const CommandLineFlag* lhs, const CommandLineFlag* rhs) {
return lhs->Name() < rhs->Name();
});
for (const auto* flag : flags_in_file.second) {
flags_internal::FlagHelp(out, *flag, format);
}
}
}
if (format == HelpFormat::kHumanReadable) {
FlagHelpPrettyPrinter printer(kHrfMaxLineLength, 0, 0, out);
if (filter_cb && matching_flags.empty()) {
printer.Write("No flags matched.\n", true);
}
printer.EndLine();
printer.Write(
"Try --helpfull to get a list of all flags or --help=substring "
"shows help for flags which include specified substring in either "
"in the name, or description or path.\n",
true);
} else {
out << "</AllFlags>\n";
}
}
void FlagsHelpImpl(std::ostream& out,
flags_internal::FlagKindFilter filename_filter_cb,
HelpFormat format, absl::string_view program_usage_message) {
FlagsHelpImpl(
out,
[&](const absl::CommandLineFlag& flag) {
return filename_filter_cb && filename_filter_cb(flag.Filename());
},
format, program_usage_message);
}
}
void FlagHelp(std::ostream& out, const CommandLineFlag& flag,
HelpFormat format) {
if (format == HelpFormat::kHumanReadable)
flags_internal::FlagHelpHumanReadable(flag, out);
}
void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format,
absl::string_view program_usage_message) {
flags_internal::FlagKindFilter filter_cb = [&](absl::string_view filename) {
return filter.empty() || absl::StrContains(filename, filter);
};
flags_internal::FlagsHelpImpl(out, filter_cb, format, program_usage_message);
}
HelpMode HandleUsageFlags(std::ostream& out,
absl::string_view program_usage_message) {
switch (GetFlagsHelpMode()) {
case HelpMode::kNone:
break;
case HelpMode::kImportant:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_help_flags,
GetFlagsHelpFormat(), program_usage_message);
break;
case HelpMode::kShort:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_helpshort_flags,
GetFlagsHelpFormat(), program_usage_message);
break;
case HelpMode::kFull:
flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(),
program_usage_message);
break;
case HelpMode::kPackage:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_helppackage_flags,
GetFlagsHelpFormat(), program_usage_message);
break;
case HelpMode::kMatch: {
std::string substr = GetFlagsHelpMatchSubstr();
if (substr.empty()) {
flags_internal::FlagsHelp(out, substr, GetFlagsHelpFormat(),
program_usage_message);
} else {
auto filter_cb = [&substr](const absl::CommandLineFlag& flag) {
if (absl::StrContains(flag.Name(), substr)) return true;
if (absl::StrContains(flag.Filename(), substr)) return true;
if (absl::StrContains(flag.Help(), substr)) return true;
return false;
};
flags_internal::FlagsHelpImpl(
out, filter_cb, HelpFormat::kHumanReadable, program_usage_message);
}
break;
}
case HelpMode::kVersion:
if (flags_internal::GetUsageConfig().version_string)
out << flags_internal::GetUsageConfig().version_string();
break;
case HelpMode::kOnlyCheckArgs:
break;
}
return GetFlagsHelpMode();
}
namespace {
ABSL_CONST_INIT absl::Mutex help_attributes_guard(absl::kConstInit);
ABSL_CONST_INIT std::string* match_substr
ABSL_GUARDED_BY(help_attributes_guard) = nullptr;
ABSL_CONST_INIT HelpMode help_mode ABSL_GUARDED_BY(help_attributes_guard) =
HelpMode::kNone;
ABSL_CONST_INIT HelpFormat help_format ABSL_GUARDED_BY(help_attributes_guard) =
HelpFormat::kHumanReadable;
}
std::string GetFlagsHelpMatchSubstr() {
absl::MutexLock l(&help_attributes_guard);
if (match_substr == nullptr) return "";
return *match_substr;
}
void SetFlagsHelpMatchSubstr(absl::string_view substr) {
absl::MutexLock l(&help_attributes_guard);
if (match_substr == nullptr) match_substr = new std::string;
match_substr->assign(substr.data(), substr.size());
}
HelpMode GetFlagsHelpMode() {
absl::MutexLock l(&help_attributes_guard);
return help_mode;
}
void SetFlagsHelpMode(HelpMode mode) {
absl::MutexLock l(&help_attributes_guard);
help_mode = mode;
}
HelpFormat GetFlagsHelpFormat() {
absl::MutexLock l(&help_attributes_guard);
return help_format;
}
void SetFlagsHelpFormat(HelpFormat format) {
absl::MutexLock l(&help_attributes_guard);
help_format = format;
}
bool DeduceUsageFlags(absl::string_view name, absl::string_view value) {
if (absl::ConsumePrefix(&name, "help")) {
if (name.empty()) {
if (value.empty()) {
SetFlagsHelpMode(HelpMode::kImportant);
} else {
SetFlagsHelpMode(HelpMode::kMatch);
SetFlagsHelpMatchSubstr(value);
}
return true;
}
if (name == "match") {
SetFlagsHelpMode(HelpMode::kMatch);
SetFlagsHelpMatchSubstr(value);
return true;
}
if (name == "on") {
SetFlagsHelpMode(HelpMode::kMatch);
SetFlagsHelpMatchSubstr(absl::StrCat("/", value, "."));
return true;
}
if (name == "full") {
SetFlagsHelpMode(HelpMode::kFull);
return true;
}
if (name == "short") {
SetFlagsHelpMode(HelpMode::kShort);
return true;
}
if (name == "package") {
SetFlagsHelpMode(HelpMode::kPackage);
return true;
}
return false;
}
if (name == "version") {
SetFlagsHelpMode(HelpMode::kVersion);
return true;
}
if (name == "only_check_args") {
SetFlagsHelpMode(HelpMode::kOnlyCheckArgs);
return true;
}
return false;
}
void MaybeExit(HelpMode mode) {
switch (mode) {
case flags_internal::HelpMode::kNone:
return;
case flags_internal::HelpMode::kOnlyCheckArgs:
case flags_internal::HelpMode::kVersion:
std::exit(0);
default:
std::exit(1);
}
}
}
ABSL_NAMESPACE_END
} | #include "absl/flags/internal/usage.h"
#include <stdint.h>
#include <sstream>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/flags/config.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/parse.h"
#include "absl/flags/internal/program_name.h"
#include "absl/flags/reflection.h"
#include "absl/flags/usage.h"
#include "absl/flags/usage_config.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
ABSL_FLAG(int, usage_reporting_test_flag_01, 101,
"usage_reporting_test_flag_01 help message");
ABSL_FLAG(bool, usage_reporting_test_flag_02, false,
"usage_reporting_test_flag_02 help message");
ABSL_FLAG(double, usage_reporting_test_flag_03, 1.03,
"usage_reporting_test_flag_03 help message");
ABSL_FLAG(int64_t, usage_reporting_test_flag_04, 1000000000000004L,
"usage_reporting_test_flag_04 help message");
ABSL_FLAG(std::string, usage_reporting_test_flag_07, "\r\n\f\v\a\b\t ",
"usage_reporting_test_flag_07 help \r\n\f\v\a\b\t ");
static const char kTestUsageMessage[] = "Custom usage message";
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
UDT& operator=(const UDT&) = default;
};
static bool AbslParseFlag(absl::string_view, UDT*, std::string*) {
return true;
}
static std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
ABSL_FLAG(UDT, usage_reporting_test_flag_05, {},
"usage_reporting_test_flag_05 help message");
ABSL_FLAG(
std::string, usage_reporting_test_flag_06, {},
"usage_reporting_test_flag_06 help message.\n"
"\n"
"Some more help.\n"
"Even more long long long long long long long long long long long long "
"help message.");
namespace {
namespace flags = absl::flags_internal;
static std::string NormalizeFileName(absl::string_view fname) {
#ifdef _WIN32
std::string normalized(fname);
std::replace(normalized.begin(), normalized.end(), '\\', '/');
fname = normalized;
#endif
auto absl_pos = fname.rfind("absl/");
if (absl_pos != absl::string_view::npos) {
fname = fname.substr(absl_pos);
}
return std::string(fname);
}
class UsageReportingTest : public testing::Test {
protected:
UsageReportingTest() {
absl::FlagsUsageConfig default_config;
default_config.normalize_filename = &NormalizeFileName;
absl::SetFlagsUsageConfig(default_config);
}
~UsageReportingTest() override {
flags::SetFlagsHelpMode(flags::HelpMode::kNone);
flags::SetFlagsHelpMatchSubstr("");
flags::SetFlagsHelpFormat(flags::HelpFormat::kHumanReadable);
}
void SetUp() override {
#if ABSL_FLAGS_STRIP_NAMES
GTEST_SKIP() << "This test requires flag names to be present";
#endif
}
private:
absl::FlagSaver flag_saver_;
};
using UsageReportingDeathTest = UsageReportingTest;
TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) {
#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL
EXPECT_EQ(absl::ProgramUsageMessage(), kTestUsageMessage);
#else
EXPECT_THAT(absl::ProgramUsageMessage(),
::testing::HasSubstr(
"This program contains tests written using Google Test"));
#endif
EXPECT_DEATH_IF_SUPPORTED(
absl::SetProgramUsageMessage("custom usage message"),
::testing::HasSubstr("SetProgramUsageMessage() called twice"));
}
TEST_F(UsageReportingTest, TestFlagHelpHRF_on_flag_01) {
const auto* flag = absl::FindCommandLineFlag("usage_reporting_test_flag_01");
std::stringstream test_buf;
flags::FlagHelp(test_buf, *flag, flags::HelpFormat::kHumanReadable);
EXPECT_EQ(
test_buf.str(),
R"( --usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
)");
}
TEST_F(UsageReportingTest, TestFlagHelpHRF_on_flag_02) {
const auto* flag = absl::FindCommandLineFlag("usage_reporting_test_flag_02");
std::stringstream test_buf;
flags::FlagHelp(test_buf, *flag, flags::HelpFormat::kHumanReadable);
EXPECT_EQ(
test_buf.str(),
R"( --usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
)");
}
TEST_F(UsageReportingTest, TestFlagHelpHRF_on_flag_03) {
const auto* flag = absl::FindCommandLineFlag("usage_reporting_test_flag_03");
std::stringstream test_buf;
flags::FlagHelp(test_buf, *flag, flags::HelpFormat::kHumanReadable);
EXPECT_EQ(
test_buf.str(),
R"( --usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
)");
}
TEST_F(UsageReportingTest, TestFlagHelpHRF_on_flag_04) {
const auto* flag = absl::FindCommandLineFlag("usage_reporting_test_flag_04");
std::stringstream test_buf;
flags::FlagHelp(test_buf, *flag, flags::HelpFormat::kHumanReadable);
EXPECT_EQ(
test_buf.str(),
R"( --usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
)");
}
TEST_F(UsageReportingTest, TestFlagHelpHRF_on_flag_05) {
const auto* flag = absl::FindCommandLineFlag("usage_reporting_test_flag_05");
std::stringstream test_buf;
flags::FlagHelp(test_buf, *flag, flags::HelpFormat::kHumanReadable);
EXPECT_EQ(
test_buf.str(),
R"( --usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
)");
}
TEST_F(UsageReportingTest, TestFlagsHelpHRF) {
std::string usage_test_flags_out =
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
--usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
--usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
--usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
--usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";)"
"\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
"help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)";
std::stringstream test_buf_01;
flags::FlagsHelp(test_buf_01, "usage_test.cc",
flags::HelpFormat::kHumanReadable, kTestUsageMessage);
EXPECT_EQ(test_buf_01.str(), usage_test_flags_out);
std::stringstream test_buf_02;
flags::FlagsHelp(test_buf_02, "flags/internal/usage_test.cc",
flags::HelpFormat::kHumanReadable, kTestUsageMessage);
EXPECT_EQ(test_buf_02.str(), usage_test_flags_out);
std::stringstream test_buf_03;
flags::FlagsHelp(test_buf_03, "usage_test", flags::HelpFormat::kHumanReadable,
kTestUsageMessage);
EXPECT_EQ(test_buf_03.str(), usage_test_flags_out);
std::stringstream test_buf_04;
flags::FlagsHelp(test_buf_04, "flags/invalid_file_name.cc",
flags::HelpFormat::kHumanReadable, kTestUsageMessage);
EXPECT_EQ(test_buf_04.str(),
R"(usage_test: Custom usage message
No flags matched.
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
std::stringstream test_buf_05;
flags::FlagsHelp(test_buf_05, "", flags::HelpFormat::kHumanReadable,
kTestUsageMessage);
std::string test_out = test_buf_05.str();
absl::string_view test_out_str(test_out);
EXPECT_TRUE(
absl::StartsWith(test_out_str, "usage_test: Custom usage message"));
EXPECT_TRUE(absl::StrContains(
test_out_str, "Flags from absl/flags/internal/usage_test.cc:"));
EXPECT_TRUE(
absl::StrContains(test_out_str, "-usage_reporting_test_flag_01 "));
}
TEST_F(UsageReportingTest, TestNoUsageFlags) {
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kNone);
}
TEST_F(UsageReportingTest, TestUsageFlag_helpshort) {
flags::SetFlagsHelpMode(flags::HelpMode::kShort);
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kShort);
EXPECT_EQ(
test_buf.str(),
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
--usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
--usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
--usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
--usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";)"
"\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
"help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
}
TEST_F(UsageReportingTest, TestUsageFlag_help_simple) {
flags::SetFlagsHelpMode(flags::HelpMode::kImportant);
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kImportant);
EXPECT_EQ(
test_buf.str(),
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
--usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
--usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
--usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
--usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";)"
"\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
"help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
}
TEST_F(UsageReportingTest, TestUsageFlag_help_one_flag) {
flags::SetFlagsHelpMode(flags::HelpMode::kMatch);
flags::SetFlagsHelpMatchSubstr("usage_reporting_test_flag_06");
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kMatch);
EXPECT_EQ(test_buf.str(),
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
}
TEST_F(UsageReportingTest, TestUsageFlag_help_multiple_flag) {
flags::SetFlagsHelpMode(flags::HelpMode::kMatch);
flags::SetFlagsHelpMatchSubstr("test_flag");
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kMatch);
EXPECT_EQ(
test_buf.str(),
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
--usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
--usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
--usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
--usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";)"
"\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
"help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
}
TEST_F(UsageReportingTest, TestUsageFlag_helppackage) {
flags::SetFlagsHelpMode(flags::HelpMode::kPackage);
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kPackage);
EXPECT_EQ(
test_buf.str(),
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
--usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
--usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
--usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
--usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";)"
"\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
"help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
}
TEST_F(UsageReportingTest, TestUsageFlag_version) {
flags::SetFlagsHelpMode(flags::HelpMode::kVersion);
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kVersion);
#ifndef NDEBUG
EXPECT_EQ(test_buf.str(), "usage_test\nDebug build (NDEBUG not #defined)\n");
#else
EXPECT_EQ(test_buf.str(), "usage_test\n");
#endif
}
TEST_F(UsageReportingTest, TestUsageFlag_only_check_args) {
flags::SetFlagsHelpMode(flags::HelpMode::kOnlyCheckArgs);
std::stringstream test_buf;
EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
flags::HelpMode::kOnlyCheckArgs);
EXPECT_EQ(test_buf.str(), "");
}
TEST_F(UsageReportingTest, TestUsageFlag_helpon) {
flags::SetFlagsHelpMode(flags::HelpMode::kMatch);
flags::SetFlagsHelpMatchSubstr("/bla-bla.");
std::stringstream test_buf_01;
EXPECT_EQ(flags::HandleUsageFlags(test_buf_01, kTestUsageMessage),
flags::HelpMode::kMatch);
EXPECT_EQ(test_buf_01.str(),
R"(usage_test: Custom usage message
No flags matched.
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
flags::SetFlagsHelpMatchSubstr("/usage_test.");
std::stringstream test_buf_02;
EXPECT_EQ(flags::HandleUsageFlags(test_buf_02, kTestUsageMessage),
flags::HelpMode::kMatch);
EXPECT_EQ(
test_buf_02.str(),
R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
default: 101;
--usage_reporting_test_flag_02 (usage_reporting_test_flag_02 help message);
default: false;
--usage_reporting_test_flag_03 (usage_reporting_test_flag_03 help message);
default: 1.03;
--usage_reporting_test_flag_04 (usage_reporting_test_flag_04 help message);
default: 1000000000000004;
--usage_reporting_test_flag_05 (usage_reporting_test_flag_05 help message);
default: UDT{};
--usage_reporting_test_flag_06 (usage_reporting_test_flag_06 help message.
Some more help.
Even more long long long long long long long long long long long long help
message.); default: "";)"
"\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
"help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
)");
}
}
int main(int argc, char* argv[]) {
(void)absl::GetFlag(FLAGS_undefok);
flags::SetProgramInvocationName("usage_test");
#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL
absl::SetProgramUsageMessage(kTestUsageMessage);
#endif
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/usage.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/usage_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
eacdd4d3-8136-4b25-b2d6-03ae16e8f6e5 | cpp | tensorflow/tensorflow | conv_algorithm_picker | third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker.cc | third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker_test.cc | #include "xla/service/gpu/autotuning/conv_algorithm_picker.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/autotuning/gpu_autotuning.pb.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/lazy_op_runner.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#if CUDNN_VERSION >= 90000
#include "third_party/gpus/cudnn/cudnn_ops.h"
#else
#include "third_party/gpus/cudnn/cudnn_ops_infer.h"
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
namespace {
using se::DeviceMemoryBase;
using se::dnn::AlgorithmDesc;
using std::optional;
Shape MaybeTupleElementShape(Shape shape, int64_t tuple_idx) {
if (shape.IsTuple()) {
return shape.tuple_shapes(tuple_idx);
} else {
return shape;
}
}
class ScratchAllocator : public se::ScratchAllocator {
public:
ScratchAllocator(int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
int64_t GetMemoryLimitInBytes() override {
return ScratchAllocator::GetDefaultMemoryLimitInBytes();
}
int64_t TotalAllocatedBytes() { return total_allocated_bytes_; }
static int64_t GetDefaultMemoryLimitInBytes() {
int64_t value;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar("TF_CUDNN_WORKSPACE_LIMIT_IN_MB",
1LL << 12, &value));
return value * (1LL << 20);
}
absl::StatusOr<se::DeviceMemory<uint8_t>> AllocateBytes(
int64_t byte_size) override;
template <typename T>
absl::StatusOr<se::DeviceMemory<T>> Allocate(int64_t num_elements) {
TF_ASSIGN_OR_RETURN(se::DeviceMemory<uint8_t> bytes,
AllocateBytes(num_elements * sizeof(T)));
return se::DeviceMemory<T>(bytes);
}
private:
const int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
std::vector<se::OwningDeviceMemory> allocated_buffers_;
int64_t total_allocated_bytes_ = 0;
};
absl::StatusOr<se::DeviceMemory<uint8_t>> ScratchAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
TF_ASSIGN_OR_RETURN(se::OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_, byte_size,
false));
total_allocated_bytes_ += byte_size;
se::DeviceMemoryBase buffer_addr = *allocated_buffer;
allocated_buffers_.push_back(std::move(allocated_buffer));
return se::DeviceMemory<uint8_t>(buffer_addr);
}
absl::StatusOr<std::vector<GenericConvRunner>> GetAlgorithms(
const GpuConvConfig& config, se::Stream* stream, bool use_cudnn_frontend,
bool use_fallback, const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config.output_type));
se::StreamExecutor* stream_exec = stream->parent();
std::vector<GenericConvRunner> result;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
switch (kind) {
default:
return Internal("Unknown ConvolutionKind %d", kind);
case se::dnn::ConvolutionKind::FORWARD_BIAS_ACTIVATION: {
if (!config.fusion) {
return Internal(
"GpuConvConfig had fusion ConvolutionKind but no FusionConfig.");
}
std::vector<std::unique_ptr<const se::dnn::FusedConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetFusedConvolveRunners(
use_cudnn_frontend,
se::dnn::ConvolutionKind::FORWARD, input_type,
BiasTypeForInputType(input_type), output_type,
config.conv_result_scale,
config.fusion->side_input_scale,
config.fusion->leakyrelu_alpha, stream,
config.input_descriptor, config.filter_descriptor,
config.bias_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, config.fusion->mode, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::FusedConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD_GRAPH: {
std::vector<std::unique_ptr<const se::dnn::GraphConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetGraphConvolveRunners(
kind, input_type, output_type, stream, config.input_descriptor,
config.filter_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, numeric_options, &runners, config.serialized_graph));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::GraphConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD:
case se::dnn::ConvolutionKind::BACKWARD_DATA:
case se::dnn::ConvolutionKind::BACKWARD_FILTER: {
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
use_cudnn_frontend, kind, input_type, output_type, stream,
config.input_descriptor,
DeviceMemoryBase(nullptr),
config.filter_descriptor,
DeviceMemoryBase(nullptr),
config.output_descriptor,
DeviceMemoryBase(nullptr), config.conv_desc,
use_fallback, nullptr, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
}
return result;
}
absl::StatusOr<std::vector<std::unique_ptr<const se::dnn::ConvRunner>>>
GetMIOpenAlgorithms(const HloCustomCallInstruction* instr,
absl::Span<se::DeviceMemoryBase> operand_buffers,
absl::Span<se::DeviceMemoryBase> result_buffers,
se::StreamExecutor* stream_exec,
ScratchAllocator* scratch_allocator, se::Stream* stream,
const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType dtype,
GetDNNDataTypeFromPrimitiveType(config.output_type));
TF_ASSIGN_OR_RETURN(
GpuConvParams params,
GetGpuConvParams(config, operand_buffers, result_buffers));
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
false, kind, dtype, dtype, stream,
params.config->input_descriptor, params.input_buf,
params.config->filter_descriptor, params.filter_buf,
params.config->output_descriptor, params.output_buf,
params.config->conv_desc,
false, scratch_allocator, numeric_options,
&runners));
return runners;
}
std::string NumBytesToString(int64_t bytes) {
return absl::StrCat(tsl::strings::HumanReadableNumBytes(bytes), " (", bytes,
"B)");
}
CudnnVersion GetCudnnVersion(se::StreamExecutor* stream_executor) {
se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(stream_executor);
CudnnVersion cudnn_version;
cudnn_version.set_major(version.major_version());
cudnn_version.set_minor(version.minor_version());
cudnn_version.set_patch(version.patch());
return cudnn_version;
}
ComputeCapability GetComputeCapability(se::StreamExecutor* stream_executor) {
ComputeCapability cc;
se::CudaComputeCapability se_cc =
stream_executor->GetDeviceDescription().cuda_compute_capability();
cc.set_major(se_cc.major);
cc.set_minor(se_cc.minor);
return cc;
}
void PrintPlatformInfo(const se::Stream* stream) {
auto* se = stream->parent();
const auto& desc = se->GetDeviceDescription();
LOG(ERROR) << "Device: " << desc.name();
LOG(ERROR) << "Platform: " << desc.platform_version();
LOG(ERROR) << "Driver: " << desc.driver_version();
LOG(ERROR) << "Runtime: " << desc.runtime_version();
auto dnn_version = GetDnnVersionInfo(se);
if (dnn_version.ok()) {
auto v = dnn_version.value();
LOG(ERROR) << "cudnn version: " << v.major_version() << "."
<< v.minor_version() << "." << v.patch();
}
}
absl::StatusOr<bool> CheckRedzones(const se::RedzoneAllocator& allocator,
se::Stream* stream, absl::string_view name,
std::string_view instr_str,
AutotuneResult* result) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("CudnnConvAlgorithmPicker checking redzones",
2);
using RedzoneCheckStatus = se::RedzoneAllocator::RedzoneCheckStatus;
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus redzone_check,
allocator.CheckRedzones());
if (redzone_check.ok()) {
return true;
}
auto* fail = result->mutable_failure();
fail->set_kind(AutotuneResult::REDZONE_MODIFIED);
*fail->mutable_msg() = redzone_check.RedzoneFailureMsg();
fail->set_buffer_address(
reinterpret_cast<uint64_t>(redzone_check.user_buffer_address));
LOG(ERROR) << absl::StreamFormat(
"Detected cudnn out-of-bounds write in conv %s buffer! This is likely a "
"cudnn bug. We will skip this algorithm in the future, but your GPU "
"state may already be corrupted, leading to incorrect results. Within "
"Google, no action is needed on your part. Outside of Google, please "
"ensure you're running the latest version of cudnn. If that doesn't fix "
"the problem, please file a bug with this full error message and we'll "
"contact nvidia.",
name);
LOG(ERROR) << redzone_check.RedzoneFailureMsg();
LOG(ERROR) << "HloInstruction " << instr_str;
PrintPlatformInfo(stream);
return false;
}
}
bool ShouldInitConvData(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 2;
}
bool ShouldCheckConv(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 4;
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithm(
const HloCustomCallInstruction* instr) {
return AutotunerUtil::Autotune(
instr, config_, [&] { return PickBestAlgorithmNoCache(instr); });
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr) {
if (config_.IsDeviceless()) {
AutotuneResult result;
result.mutable_algorithm()->set_algo_id(-1);
return result;
}
se::StreamExecutor* stream_exec = config_.GetExecutor();
absl::MutexLock lock(&GetGpuMutex(stream_exec));
if (!stream_exec->SynchronizeAllActivity()) {
return Internal(
"Failed to synchronize GPU for autotuning conv instruction");
}
absl::StatusOr<AutotuneResult> result_or(Internal("Unknown platform."));
se::Platform::Id platform_id = stream_exec->GetPlatform()->id();
if (platform_id == se::rocm::kROCmPlatformId) {
result_or = PickBestAlgorithmNoCacheRocm(instr);
} else if (platform_id == se::cuda::kCudaPlatformId) {
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
result_or = PickBestAlgorithmNoCacheCuda(instr);
#endif
}
return result_or;
}
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
absl::StatusOr<GpuConvAlgorithmPicker::AutotuneRuntimeArguments>
GpuConvAlgorithmPicker::AutotuneRuntimeArguments::FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options) {
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*instr, config, debug_options,
RedzoneBuffers::kAllInputsOutputsNoScratch));
std::string canonical_hlo(
AutotuneCacheKey(config.GetExecutor()->GetDeviceDescription(), *instr)
.GetHlo());
TF_ASSIGN_OR_RETURN(GpuConvConfig gpu_conv_config, GetGpuConvConfig(instr));
GpuConvAlgorithmPicker::AutotuneRuntimeArguments runtime_arguments = {
instr->GetModule()->config(),
std::move(rz_buffers),
std::move(gpu_conv_config),
{canonical_hlo}};
return runtime_arguments;
}
struct CudnnVersionRange {
using TupleVersion = std::tuple<int, int, int>;
TupleVersion begin;
TupleVersion end;
bool IsInRange(const CudnnVersion& other) const {
TupleVersion other_version{other.major(), other.minor(), other.patch()};
return begin <= other_version && other_version < end;
}
CudnnVersionRange(const CudnnVersion& begin, const CudnnVersion& end)
: begin(begin.major(), begin.minor(), begin.patch()),
end(end.major(), end.minor(), end.patch()) {}
CudnnVersionRange(const TupleVersion& begin, const TupleVersion& end)
: begin(begin), end(end) {}
};
struct ComputeCapabilityRange {
using TupleComputeCapability = std::tuple<int, int>;
TupleComputeCapability begin;
TupleComputeCapability end;
bool IsInRange(const ComputeCapability& other) const {
TupleComputeCapability other_cc{other.major(), other.minor()};
return begin <= other_cc && other_cc < end;
}
};
struct DisabledAlgorithm {
CudnnVersionRange cudnn_version_range;
ComputeCapabilityRange compute_capability_range;
int algo_id;
};
static const DisabledAlgorithm kDisabledAlgorithms[] = {
{{{9, 0, 0}, {10, 0, 0}},
{{6, 0}, {8, 0}},
14}};
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::AutotuneOneConvRunner(
GenericConvRunner* const runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments) {
auto alg = runner->ToAlgorithmDesc();
se::StreamExecutor* stream_exec = config_.GetExecutor();
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
auto make_failure = [&alg](AutotuneResult::FailureKind kind,
absl::string_view msg) {
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->set_msg( msg.data(), msg.size());
return result;
};
AlgorithmDesc alg_key(alg.algo_id(), alg.tensor_ops_enabled(), std::nullopt);
std::string instr_str = instruction_info.has_value()
? std::string(instruction_info->GetHlo())
: "<unknown>";
for (const auto& disabled_algo : kDisabledAlgorithms) {
if (disabled_algo.cudnn_version_range.IsInRange(
GetCudnnVersion(stream_exec)) &&
disabled_algo.compute_capability_range.IsInRange(
GetComputeCapability(stream_exec)) &&
disabled_algo.algo_id == alg.algo_id()) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
}
if (absl::c_linear_search(disabled_algos, alg_key)) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
GpuConvConfig config = runtime_arguments.gpu_conv_config;
auto activation_mode =
config.fusion ? config.fusion->mode : se::dnn::ActivationMode::kNone;
if (!alg.is_cudnn_frontend() &&
config.kind == CudnnConvKind::kForwardActivation &&
activation_mode == se::dnn::ActivationMode::kNone &&
alg.algo_id() != CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) {
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for implicit RELU.");
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator scratch_allocator,
AutotunerUtil::CreateRedzoneAllocator(
config_, runtime_arguments.hlo_module_config.debug_options()));
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for " << instr_str;
SlowOperationAlarm alarm(absl::Seconds(1), [&] {
return absl::StrFormat(
"Trying algorithm %s for conv %s is taking a while...", alg.ToString(),
instr_str);
});
std::optional<size_t> workspace_size =
runner->ToAlgorithmDesc().workspace_size();
if (!workspace_size) {
return make_failure(AutotuneResult::UNKNOWN,
"Internal error: missing workspace size from "
"OpRunner::ToAlgorithmDesc()");
}
auto scratch_or = scratch_allocator.AllocateBytes(*workspace_size);
if (!scratch_or.ok()) {
return make_failure(AutotuneResult::DISQUALIFIED,
absl::StrCat("Scratch allocation failed: ",
scratch_or.status().ToString()));
}
se::DeviceMemoryBase scratch_memory = scratch_or.value();
RunConvOptions options;
options.runner_cache = runner;
float max_time = 0;
float min_time = std::numeric_limits<float>::max();
absl::Status launch_status;
std::vector<se::DeviceMemoryBase> operand_buffers =
runtime_arguments.rz_buffers.input_buffers();
std::vector<se::DeviceMemoryBase> result_buffers =
runtime_arguments.rz_buffers.output_buffers();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
options.profile_result = &profile_result;
profile_result.set_warmup_run_executed(true);
constexpr int kMaxIter = 10;
int num_iters = 0;
for (; num_iters < kMaxIter && launch_status.ok(); ++num_iters) {
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
if (!profile_result.is_valid()) {
break;
}
float old_min_time = min_time;
min_time = std::min(min_time, profile_result.elapsed_time_in_ms());
max_time = std::max(max_time, profile_result.elapsed_time_in_ms());
constexpr float kThreshold = 0.05f;
if (std::abs(profile_result.elapsed_time_in_ms() - old_min_time) /
old_min_time <
kThreshold) {
break;
}
}
if (!launch_status.ok()) {
VLOG(5) << "Launch failed: " << launch_status;
return make_failure(
AutotuneResult::DISQUALIFIED,
absl::StrCat("Profiling failure on cuDNN engine ", alg.ToString(), ": ",
launch_status.ToString()));
}
if (!profile_result.is_valid()) {
VLOG(5) << "Launch succeeded but profile result is invalid.";
return make_failure(
AutotuneResult::UNKNOWN,
absl::StrCat("Launch succeeded but profile result is invalid, "
"with cuDNN engine ",
alg.ToString(), ": ", launch_status.ToString()));
}
VLOG(4) << "Best time: " << min_time << " ms. Worst time: " << max_time
<< " ms. Total iterations: " << num_iters;
int64_t scratch_bytes_used =
scratch_allocator.TotalAllocatedBytesExcludingRedzones();
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.set_scratch_bytes(scratch_bytes_used);
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Milliseconds(min_time));
if (!ShouldCheckConv(runtime_arguments.hlo_module_config)) {
if (!reference_result->has_value()) {
(*reference_result) = {
alg, std::vector<DeviceMemoryBase>(result_buffers.size())};
}
return result;
}
TF_ASSIGN_OR_RETURN(
bool input_output_allocator_redzone_clear,
CheckRedzones(runtime_arguments.rz_buffers.RedzoneAllocator(), stream,
"input/output", instr_str, &result));
TF_ASSIGN_OR_RETURN(
bool scratch_allocator_redzone_clear,
CheckRedzones(scratch_allocator, stream, "scratch", instr_str, &result));
if (!input_output_allocator_redzone_clear ||
!scratch_allocator_redzone_clear) {
if (runtime_arguments.canonical_hlo.has_value()) {
std::string canonical_hlo = runtime_arguments.canonical_hlo.value();
std::string blas_version;
if (auto* blas = stream_exec->AsBlas()) {
(void)blas->GetVersion(&blas_version);
}
AlgorithmDenylist proto;
auto entry = proto.add_entries();
entry->set_hlo(canonical_hlo);
*entry->mutable_cc() = GetComputeCapability(stream_exec);
*entry->mutable_cudnn_version() = GetCudnnVersion(stream_exec);
entry->set_blas_version(blas_version);
auto algo = entry->add_algos();
algo->set_id(alg.algo_id());
algo->set_tensor_ops(alg.tensor_ops_enabled());
LOG(ERROR) << "To denylist this algorithm for this convolution, "
"copy-paste the following "
"proto to the denylist file pointed by XLA_FLAGS "
"--xla_gpu_algorithm_denylist_path="
<< GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path()
<< " : " << proto.ShortDebugString();
}
return result;
}
if (reference_result->has_value()) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("BufferComparator::CompareEqual", 2);
const DebugOptions& debug_options =
runtime_arguments.hlo_module_config.debug_options();
for (int i = 0; i < result_buffers.size(); ++i) {
Shape output_shape = MaybeTupleElementShape(
runtime_arguments.rz_buffers.output_shape(), i);
XLA_SCOPED_LOGGING_TIMER_LEVEL("BufferComparator::CompareEqual", 2);
BufferComparator comparator(output_shape,
debug_options.xla_gpu_autotune_gemm_rtol());
absl::StatusOr<bool> compare_result = comparator.CompareEqual(
stream, (*reference_result)->buffers[i], result_buffers[i]);
if (!compare_result.ok()) {
LOG(ERROR) << "Unable to compare "
<< (*reference_result)->algorithm.ToString() << " against "
<< alg.ToString() << " for " << instr_str << ": "
<< compare_result.status();
if (compare_result.status().code() ==
absl::StatusCode::kResourceExhausted) {
return compare_result.status();
}
CHECK(!debug_options.xla_gpu_crash_on_verification_failures());
} else if (!compare_result.value()) {
LOG(ERROR)
<< "Results mismatch between different convolution algorithms. "
"This is likely a bug/unexpected loss of precision in cudnn.\n"
<< instr_str << " for " << (*reference_result)->algorithm.ToString()
<< " vs " << alg.ToString();
PrintPlatformInfo(stream);
if (instruction_info.has_value()) {
VLOG(2) << "Full module on failure: \n"
<< instruction_info->GetModelStr();
}
auto* fail = result.mutable_failure();
fail->set_kind(AutotuneResult::WRONG_RESULT);
fail->set_buffer_address(
reinterpret_cast<uint64_t>(result_buffers[i].opaque()));
*fail->mutable_reference_algorithm() =
(*reference_result)->algorithm.ToProto();
}
}
} else {
XLA_SCOPED_LOGGING_TIMER_LEVEL("Memcpy Reference Result", 2);
std::vector<DeviceMemoryBase> reference_result_buffers(
result_buffers.size());
for (int i = 0; i < result_buffers.size(); ++i) {
TF_ASSIGN_OR_RETURN(
reference_result_buffers[i],
runtime_arguments.rz_buffers.RedzoneAllocator().AllocateBytes(
result_buffers[i].size()));
TF_RETURN_IF_ERROR(stream->Memcpy(&reference_result_buffers[i],
result_buffers[i],
result_buffers[i].size()));
}
(*reference_result) = {alg, reference_result_buffers};
}
return result;
}
absl::StatusOr<AutotuneResult>
GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheCuda(
const HloCustomCallInstruction* instr) {
AutotuneCacheKey instruction_info{config_.GetModelStr(), *instr};
std::string instr_str(instruction_info.GetHlo());
XLA_SCOPED_LOGGING_TIMER(absl::StrCat(
"GpuConvAlgorithmPicker::PickBestAlgorithmImpl for ", instr_str));
const DebugOptions& debug_options =
instr->GetModule()->config().debug_options();
const bool crash_on_checking_failure =
debug_options.xla_gpu_crash_on_verification_failures();
std::string blas_version;
se::StreamExecutor* stream_exec = config_.GetExecutor();
if (auto* blas = stream_exec->AsBlas()) {
(void)blas->GetVersion(&blas_version);
}
std::vector<AlgorithmDesc> disabled_algos;
TF_ASSIGN_OR_RETURN(
AutotuneRuntimeArguments runtime_arguments,
AutotuneRuntimeArguments::FromInstruction(instr, config_, debug_options));
if (runtime_arguments.canonical_hlo.has_value()) {
disabled_algos = GetDisabledConvAlgorithms(
GetComputeCapability(stream_exec), GetCudnnVersion(stream_exec),
blas_version, runtime_arguments.canonical_hlo.value());
}
const bool cudnn_frontend_enabled =
debug_options.xla_gpu_enable_cudnn_frontend();
bool allow_tf32 = true;
if (instr) {
allow_tf32 = absl::c_all_of(
instr->precision_config().operand_precision(),
[](int precision) { return precision <= PrecisionConfig::HIGH; });
}
const se::NumericOptions numeric_options{
RequireDeterminism(instr->GetModule()->config()), allow_tf32};
std::optional<ReferenceResult> reference_result;
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
TF_ASSIGN_OR_RETURN(
std::vector<GenericConvRunner> runners,
GetAlgorithms(runtime_arguments.gpu_conv_config, stream,
cudnn_frontend_enabled,
false, numeric_options));
std::vector<AutotuneResult> profile_results;
for (auto& runner_cache : runners) {
TF_ASSIGN_OR_RETURN(
auto result,
AutotuneOneConvRunner(&runner_cache, &reference_result, disabled_algos,
instruction_info, runtime_arguments));
profile_results.emplace_back(std::move(result));
}
if (!reference_result) {
LOG(WARNING) << "None of the algorithms provided by cuDNN heuristics "
"worked; trying fallback algorithms.";
if (runtime_arguments.canonical_hlo.has_value()) {
LOG(WARNING) << "Conv: " << runtime_arguments.canonical_hlo.value();
}
TF_ASSIGN_OR_RETURN(
std::vector<GenericConvRunner> fallback_runners,
GetAlgorithms(runtime_arguments.gpu_conv_config, stream,
cudnn_frontend_enabled,
true, numeric_options));
for (auto& runner_cache : fallback_runners) {
TF_ASSIGN_OR_RETURN(
auto result, AutotuneOneConvRunner(&runner_cache, &reference_result,
disabled_algos, instruction_info,
runtime_arguments));
profile_results.emplace_back(std::move(result));
}
}
if (instr) {
AutotuningLog log;
{
ConvInstructionLog instr_log;
*instr_log.mutable_instruction() = instr->ToProto();
for (int i = 0; i < instr->operand_count(); i++) {
*instr_log.add_operand_shapes() = instr->operand(i)->shape().ToProto();
instr_log.add_operand_addresses(reinterpret_cast<uint64_t>(
runtime_arguments.rz_buffers.input_buffers()[i].opaque()));
}
for (se::DeviceMemoryBase result_buffer :
runtime_arguments.rz_buffers.output_buffers()) {
instr_log.add_result_addresses(
reinterpret_cast<uint64_t>(result_buffer.opaque()));
}
log.mutable_instr()->PackFrom(instr_log);
}
for (const auto& profile : profile_results) {
*log.add_results() = profile;
}
*log.mutable_compute_capability() = GetComputeCapability(stream_exec);
*log.mutable_cudnn_version() = GetCudnnVersion(stream_exec);
log.set_device_pci_bus_id(stream_exec->GetDeviceDescription().pci_bus_id());
log.set_blas_version(blas_version);
VLOG(2) << "Autotuning result: " << log.ShortDebugString();
if (crash_on_checking_failure) {
for (const auto& profile : profile_results) {
if (profile.has_failure() &&
profile.failure().kind() != AutotuneResult::DISQUALIFIED) {
LOG(FATAL) << "crash_on_checking_failure encountered errors:\n\n"
<< log.DebugString();
}
}
}
}
TF_ASSIGN_OR_RETURN(AutotuneResult selected_algorithm,
PickBestResult(profile_results, instr_str,
runtime_arguments.hlo_module_config));
return selected_algorithm;
}
#endif
absl::StatusOr<AutotuneResult>
GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheRocm(
const HloCustomCallInstruction* instr) {
XLA_SCOPED_LOGGING_TIMER(absl::StrCat(
"GpuConvAlgorithmPicker::PickBestAlgorithmImpl for ", instr->ToString()));
const bool allow_tf32 = absl::c_all_of(
instr->precision_config().operand_precision(),
[](int precision) { return precision <= PrecisionConfig::HIGH; });
const se::NumericOptions numeric_options{
RequireDeterminism(instr->GetModule()->config()), allow_tf32};
se::StreamExecutor* stream_exec = config_.GetExecutor();
const auto device_ordinal = stream_exec->device_ordinal();
std::vector<se::DeviceMemoryBase> operand_buffers;
se::DeviceMemoryAllocator* allocator = config_.GetAllocator();
ScratchAllocator input_output_allocator(device_ordinal, allocator);
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
const auto initialize_buffer = [stream](DeviceMemoryBase buffer) {
return stream->MemZero(&buffer, buffer.size());
};
for (const auto* operand : instr->operands()) {
TF_ASSIGN_OR_RETURN(auto buffer,
input_output_allocator.AllocateBytes(
ShapeUtil::ByteSizeOf(operand->shape())));
TF_RETURN_IF_ERROR(initialize_buffer(buffer));
operand_buffers.push_back(buffer);
}
std::vector<se::DeviceMemoryBase> result_buffers(
instr->shape().tuple_shapes_size());
if (instr->shape().IsTuple()) {
for (int i = 0; i < instr->shape().tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(
result_buffers[i],
input_output_allocator.AllocateBytes(
ShapeUtil::ByteSizeOf(instr->shape().tuple_shapes(i))));
TF_RETURN_IF_ERROR(initialize_buffer(result_buffers[i]));
}
} else {
TF_ASSIGN_OR_RETURN(
result_buffers[0],
input_output_allocator.AllocateBytes(
ShapeUtil::ByteSizeOf(instr->shape().tuple_shapes(0))));
TF_RETURN_IF_ERROR(initialize_buffer(result_buffers[0]));
}
ScratchAllocator scratch_allocator(device_ordinal, allocator);
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners,
GetMIOpenAlgorithms(instr, absl::MakeSpan(operand_buffers),
absl::MakeSpan(result_buffers), stream_exec,
&scratch_allocator, stream, numeric_options));
std::vector<AutotuneResult> profile_results;
if (runners.size() == 1) {
TF_ASSIGN_OR_RETURN(auto alg, runners[0]->ToAlgorithmDesc());
auto algorithm_proto = alg.ToProto();
profile_results.emplace_back();
auto& result = profile_results.back();
*result.mutable_algorithm() = algorithm_proto;
result.set_scratch_bytes(runners[0]->GetWorkspaceSize());
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Milliseconds(-1));
} else {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(auto alg, runner->ToAlgorithmDesc());
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for "
<< instr->ToString();
TF_ASSIGN_OR_RETURN(
DeviceMemoryBase scratch_memory,
scratch_allocator.AllocateBytes(runner->GetWorkspaceSize()));
TF_ASSIGN_OR_RETURN(auto lazy_runner,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
GenericConvRunner runner_cache(std::move(lazy_runner));
RunConvOptions options;
options.profile_result = &profile_result;
options.runner_cache = &runner_cache;
absl::Status launch_status =
RunGpuConv(config, absl::MakeSpan(operand_buffers), result_buffers,
scratch_memory, stream, options);
if (!launch_status.ok()) {
continue;
}
if (!profile_result.is_valid()) {
continue;
}
profile_results.emplace_back();
AutotuneResult& result = profile_results.back();
*result.mutable_algorithm() = alg.ToProto();
int64_t scratch_bytes_used = scratch_allocator.TotalAllocatedBytes();
result.set_scratch_bytes(scratch_bytes_used);
*result.mutable_run_time() = tsl::proto_utils::ToDurationProto(
absl::Milliseconds(profile_result.elapsed_time_in_ms()));
}
}
TF_ASSIGN_OR_RETURN(AutotuneResult selected_algorithm,
PickBestResult(profile_results, instr->ToString(),
instr->GetModule()->config()));
return selected_algorithm;
}
absl::StatusOr<bool> GpuConvAlgorithmPicker::RunOnInstruction(
HloInstruction* instr) {
CHECK(IsCustomCallToDnnConvolution(*instr));
const bool strict = instr->parent()
->parent()
->config()
.debug_options()
.xla_gpu_strict_conv_algorithm_picker();
absl::StatusOr<AutotuneResult> best_algo_or =
PickBestAlgorithm(Cast<HloCustomCallInstruction>(instr));
if (!best_algo_or.ok()) {
auto msg = absl::StrFormat(
"Failed to determine best cudnn convolution algorithm for:\n%s\n\n"
"Original error: %s",
instr->ToString(), best_algo_or.status().ToString());
if (strict) {
return Unknown(
"%s\n\nTo ignore this failure and try to use a fallback algorithm "
"(which may have suboptimal performance), use "
"XLA_FLAGS=--xla_gpu_strict_conv_algorithm_picker=false. Please "
"also file a bug for the root cause of failing autotuning.",
msg);
}
LOG(WARNING)
<< msg << "\n\nAs a result, convolution performance may be suboptimal.";
return false;
}
auto best_algo = std::move(best_algo_or).value();
VLOG(3) << "Setting cudnn conv to use algorithm "
<< best_algo.conv().algorithm() << " and "
<< NumBytesToString(best_algo.scratch_bytes())
<< " of scratch memory: " << instr->ToString()
<< " tensor_ops_enabled: " << best_algo.conv().tensor_ops_enabled();
HloComputation* computation = instr->parent();
std::vector<Shape> new_call_element_shapes;
new_call_element_shapes.reserve(instr->shape().tuple_shapes_size() - 1);
for (int i = 0; i < instr->shape().tuple_shapes_size() - 1; ++i) {
new_call_element_shapes.emplace_back(instr->shape().tuple_shapes(i));
}
new_call_element_shapes.emplace_back(
ShapeUtil::MakeShape(U8, {best_algo.scratch_bytes()}));
Shape new_call_shape = ShapeUtil::MakeTupleShape(new_call_element_shapes);
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_backend_config,
instr->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& backend_config =
*gpu_backend_config.mutable_cudnn_conv_backend_config();
*backend_config.mutable_algorithm() = best_algo.algorithm();
backend_config.mutable_algorithm()->mutable_workspace_size()->set_value(
best_algo.scratch_bytes());
HloInstruction* new_call = computation->AddInstruction(
instr->CloneWithNewOperands(new_call_shape, instr->operands()));
new_call->SetAndSanitizeName(instr->name());
VLOG(3) << "Replacing convolution " << instr->ToString() << " with "
<< new_call->ToString();
TF_RETURN_IF_ERROR(new_call->set_backend_config(gpu_backend_config));
std::vector<HloInstruction*> new_tuple_elements;
new_tuple_elements.reserve(new_call->shape().tuple_shapes_size() - 1);
for (int i = 0; i < new_call->shape().tuple_shapes_size() - 1; ++i) {
new_tuple_elements.emplace_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_call->shape().tuple_shapes(i), new_call, i)));
}
new_tuple_elements.emplace_back(computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<uint8_t>({}))));
HloInstruction* new_tuple = computation->AddInstruction(
HloInstruction::CreateTuple(new_tuple_elements));
TF_RETURN_IF_ERROR(instr->parent()->ReplaceInstruction(instr, new_tuple));
return true;
}
absl::StatusOr<bool> GpuConvAlgorithmPicker::RunOnComputation(
HloComputation* computation) {
std::vector<HloInstruction*> convs;
for (HloInstruction* instr : computation->instructions()) {
if (IsCandidate(instr)) {
convs.push_back(instr);
}
}
bool changed = false;
for (HloInstruction* instr : convs) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(instr));
changed |= result;
}
return changed;
}
absl::StatusOr<bool> GpuConvAlgorithmPicker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER(
absl::StrCat("GpuConvAlgorithmPicker for ", module->name()));
if (!IsEnabled(module)) {
VLOG(3) << "Convolution auto-tuning disabled, GpuConvAlgorithmPicker "
"returning early.";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/autotuning/conv_algorithm_picker.h"
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/transforms/conv_rewriter.h"
#include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/platform_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GpuConvAlgorithmPickerTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
GpuConvAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
};
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[3,56,56,16]{2,1,0,3} parameter(0)
%arg1 = f32[3,3,3,64]{2,1,0,3} parameter(1)
ROOT %conv = f32[54,54,16,64]{1,0,3,2} convolution(%arg0, %arg1), window={size=3x3}, dim_labels=f01b_i01o->01bf
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(ConvRewriter(cc), m.get()));
changed = false;
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_scratch_bytes = result.scratch_bytes();
int64_t new_scratch_bytes = old_scratch_bytes + 1;
result.set_scratch_bytes(new_scratch_bytes);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(ConvRewriter(cc), m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK(RunHloPass(TupleSimplifier(), m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&conv))));
EXPECT_THAT(
conv->shape(),
GmockMatch(m::Shape().WithSubshape(
{1}, m::Shape().WithElementType(U8).WithDims({new_scratch_bytes}))));
TF_ASSERT_OK_AND_ASSIGN(auto dnn_version, GetDnnVersionInfo(stream_exec));
if (dnn_version.major_version() >= 9 && dnn_version.major_version() < 10 &&
std::holds_alternative<stream_executor::CudaComputeCapability>(cc) &&
std::get<stream_executor::CudaComputeCapability>(cc).major == 7 &&
std::get<stream_executor::CudaComputeCapability>(cc).minor == 0) {
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->has_cudnn_conv_backend_config() &&
conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.algorithm()
.algo_id() != 14);
}
}
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithmGraphConvF8) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP() << "FP8 convolutions require Hopper or newer architecture.";
}
constexpr absl::string_view kHlo = R"(
HloModule module
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
ENTRY main {
input = f8e4m3fn[1,6,6,128] parameter(0)
filter = f8e4m3fn[16,3,3,128] parameter(1)
input_scale = f32[] parameter(2)
input_scale_bcast = f32[1,6,6,128] broadcast(input_scale), dimensions={}
filter_scale = f32[] parameter(3)
filter_scale_bcast = f32[16,3,3,128] broadcast(filter_scale), dimensions={}
input_f32 = f32[1,6,6,128] convert(input)
input_unscaled = f32[1,6,6,128] multiply(input_f32, input_scale_bcast)
filter_f32 = f32[16,3,3,128] convert(filter)
filter_unscaled = f32[16,3,3,128] multiply(filter_f32, filter_scale_bcast)
conv_a = f32[1,6,6,16] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_o01i->b01f, feature_group_count=1
z_scale = f32[] parameter(4)
z_scale_bcast = f32[1,6,6,16] broadcast(z_scale), dimensions={}
conv_a_scaled = f32[1,6,6,16] multiply(conv_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,6,6,16] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,6,6,16] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,6,6,16] clamp(c1_bcast, conv_a_scaled, c2_bcast)
conv_a_clamped_f8 = f8e4m3fn[1,6,6,16] convert(conv_a_clamped)
abs_conv_a = f32[1,6,6,16] abs(conv_a)
c0 = f32[] constant(-inf)
amax = f32[] reduce(abs_conv_a, c0), dimensions={0,1,2,3}, to_apply=apply
ROOT conv_f8 = (f8e4m3fn[1,6,6,16], f32[]) tuple(conv_a_clamped_f8, amax)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = GetCudaComputeCapability();
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(ConvRewriter(cc), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(CudnnFusedConvRewriter(
GetCudaComputeCapability(), GetDnnVersion(),
stream_exec->GetDeviceDescription().runtime_version()),
m.get()));
ASSERT_TRUE(changed);
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90e4c0d3-86b4-4bd3-b131-1140cb425e65 | cpp | google/cel-cpp | optional_or_step | eval/eval/optional_or_step.cc | eval/eval/optional_or_step_test.cc | #include "eval/eval/optional_or_step.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "common/casting.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "eval/eval/jump_step.h"
#include "internal/status_macros.h"
#include "runtime/internal/errors.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::As;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::OptionalValue;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::runtime_internal::CreateNoMatchingOverloadError;
enum class OptionalOrKind { kOrOptional, kOrValue };
ErrorValue MakeNoOverloadError(OptionalOrKind kind) {
switch (kind) {
case OptionalOrKind::kOrOptional:
return ErrorValue(CreateNoMatchingOverloadError("or"));
case OptionalOrKind::kOrValue:
return ErrorValue(CreateNoMatchingOverloadError("orValue"));
}
ABSL_UNREACHABLE();
}
class OptionalHasValueJumpStep final : public JumpStepBase {
public:
OptionalHasValueJumpStep(int64_t expr_id, OptionalOrKind kind)
: JumpStepBase({}, expr_id), kind_(kind) {}
absl::Status Evaluate(ExecutionFrame* frame) const override {
if (!frame->value_stack().HasEnough(1)) {
return absl::Status(absl::StatusCode::kInternal, "Value stack underflow");
}
const auto& value = frame->value_stack().Peek();
auto optional_value = As<OptionalValue>(value);
const bool should_jump =
(optional_value.has_value() && optional_value->HasValue()) ||
(!optional_value.has_value() && (cel::InstanceOf<ErrorValue>(value) ||
cel::InstanceOf<UnknownValue>(value)));
if (should_jump) {
if (kind_ == OptionalOrKind::kOrValue && optional_value.has_value()) {
frame->value_stack().PopAndPush(optional_value->Value());
}
return Jump(frame);
}
return absl::OkStatus();
}
private:
const OptionalOrKind kind_;
};
class OptionalOrStep : public ExpressionStepBase {
public:
explicit OptionalOrStep(int64_t expr_id, OptionalOrKind kind)
: ExpressionStepBase(expr_id), kind_(kind) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
const OptionalOrKind kind_;
};
absl::Status EvalOptionalOr(OptionalOrKind kind, const Value& lhs,
const Value& rhs, const AttributeTrail& lhs_attr,
const AttributeTrail& rhs_attr, Value& result,
AttributeTrail& result_attr) {
if (InstanceOf<ErrorValue>(lhs) || InstanceOf<UnknownValue>(lhs)) {
result = lhs;
result_attr = lhs_attr;
return absl::OkStatus();
}
auto lhs_optional_value = As<OptionalValue>(lhs);
if (!lhs_optional_value.has_value()) {
result = MakeNoOverloadError(kind);
result_attr = AttributeTrail();
return absl::OkStatus();
}
if (lhs_optional_value->HasValue()) {
if (kind == OptionalOrKind::kOrValue) {
result = lhs_optional_value->Value();
} else {
result = lhs;
}
result_attr = lhs_attr;
return absl::OkStatus();
}
if (kind == OptionalOrKind::kOrOptional && !InstanceOf<ErrorValue>(rhs) &&
!InstanceOf<UnknownValue>(rhs) && !InstanceOf<OptionalValue>(rhs)) {
result = MakeNoOverloadError(kind);
result_attr = AttributeTrail();
return absl::OkStatus();
}
result = rhs;
result_attr = rhs_attr;
return absl::OkStatus();
}
absl::Status OptionalOrStep::Evaluate(ExecutionFrame* frame) const {
if (!frame->value_stack().HasEnough(2)) {
return absl::InternalError("Value stack underflow");
}
absl::Span<const Value> args = frame->value_stack().GetSpan(2);
absl::Span<const AttributeTrail> args_attr =
frame->value_stack().GetAttributeSpan(2);
Value result;
AttributeTrail result_attr;
CEL_RETURN_IF_ERROR(EvalOptionalOr(kind_, args[0], args[1], args_attr[0],
args_attr[1], result, result_attr));
frame->value_stack().PopAndPush(2, std::move(result), std::move(result_attr));
return absl::OkStatus();
}
class ExhaustiveDirectOptionalOrStep : public DirectExpressionStep {
public:
ExhaustiveDirectOptionalOrStep(
int64_t expr_id, std::unique_ptr<DirectExpressionStep> optional,
std::unique_ptr<DirectExpressionStep> alternative, OptionalOrKind kind)
: DirectExpressionStep(expr_id),
kind_(kind),
optional_(std::move(optional)),
alternative_(std::move(alternative)) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override;
private:
OptionalOrKind kind_;
std::unique_ptr<DirectExpressionStep> optional_;
std::unique_ptr<DirectExpressionStep> alternative_;
};
absl::Status ExhaustiveDirectOptionalOrStep::Evaluate(
ExecutionFrameBase& frame, Value& result, AttributeTrail& attribute) const {
CEL_RETURN_IF_ERROR(optional_->Evaluate(frame, result, attribute));
Value rhs;
AttributeTrail rhs_attr;
CEL_RETURN_IF_ERROR(alternative_->Evaluate(frame, rhs, rhs_attr));
CEL_RETURN_IF_ERROR(EvalOptionalOr(kind_, result, rhs, attribute, rhs_attr,
result, attribute));
return absl::OkStatus();
}
class DirectOptionalOrStep : public DirectExpressionStep {
public:
DirectOptionalOrStep(int64_t expr_id,
std::unique_ptr<DirectExpressionStep> optional,
std::unique_ptr<DirectExpressionStep> alternative,
OptionalOrKind kind)
: DirectExpressionStep(expr_id),
kind_(kind),
optional_(std::move(optional)),
alternative_(std::move(alternative)) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override;
private:
OptionalOrKind kind_;
std::unique_ptr<DirectExpressionStep> optional_;
std::unique_ptr<DirectExpressionStep> alternative_;
};
absl::Status DirectOptionalOrStep::Evaluate(ExecutionFrameBase& frame,
Value& result,
AttributeTrail& attribute) const {
CEL_RETURN_IF_ERROR(optional_->Evaluate(frame, result, attribute));
if (InstanceOf<UnknownValue>(result) || InstanceOf<ErrorValue>(result)) {
return absl::OkStatus();
}
auto optional_value = As<OptionalValue>(static_cast<const Value&>(result));
if (!optional_value.has_value()) {
result = MakeNoOverloadError(kind_);
return absl::OkStatus();
}
if (optional_value->HasValue()) {
if (kind_ == OptionalOrKind::kOrValue) {
result = optional_value->Value();
}
return absl::OkStatus();
}
CEL_RETURN_IF_ERROR(alternative_->Evaluate(frame, result, attribute));
if (kind_ == OptionalOrKind::kOrOptional) {
if (!InstanceOf<OptionalValue>(result) && !InstanceOf<ErrorValue>(result) &&
!InstanceOf<UnknownValue>(result)) {
result = MakeNoOverloadError(kind_);
}
}
return absl::OkStatus();
}
}
absl::StatusOr<std::unique_ptr<JumpStepBase>> CreateOptionalHasValueJumpStep(
bool or_value, int64_t expr_id) {
return std::make_unique<OptionalHasValueJumpStep>(
expr_id,
or_value ? OptionalOrKind::kOrValue : OptionalOrKind::kOrOptional);
}
std::unique_ptr<ExpressionStep> CreateOptionalOrStep(bool is_or_value,
int64_t expr_id) {
return std::make_unique<OptionalOrStep>(
expr_id,
is_or_value ? OptionalOrKind::kOrValue : OptionalOrKind::kOrOptional);
}
std::unique_ptr<DirectExpressionStep> CreateDirectOptionalOrStep(
int64_t expr_id, std::unique_ptr<DirectExpressionStep> optional,
std::unique_ptr<DirectExpressionStep> alternative, bool is_or_value,
bool short_circuiting) {
auto kind =
is_or_value ? OptionalOrKind::kOrValue : OptionalOrKind::kOrOptional;
if (short_circuiting) {
return std::make_unique<DirectOptionalOrStep>(expr_id, std::move(optional),
std::move(alternative), kind);
} else {
return std::make_unique<ExhaustiveDirectOptionalOrStep>(
expr_id, std::move(optional), std::move(alternative), kind);
}
}
} | #include "eval/eval/optional_or_step.h"
#include <memory>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "common/value_testing.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/const_value_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/internal/errors.h"
#include "runtime/managed_value_factory.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::Activation;
using ::cel::As;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::IntValue;
using ::cel::ManagedValueFactory;
using ::cel::MemoryManagerRef;
using ::cel::OptionalValue;
using ::cel::RuntimeOptions;
using ::cel::TypeReflector;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::ValueKind;
using ::cel::test::ErrorValueIs;
using ::cel::test::IntValueIs;
using ::cel::test::OptionalValueIs;
using ::cel::test::ValueKindIs;
using ::testing::HasSubstr;
using ::testing::NiceMock;
class MockDirectStep : public DirectExpressionStep {
public:
MOCK_METHOD(absl::Status, Evaluate,
(ExecutionFrameBase & frame, Value& result,
AttributeTrail& scratch),
(const, override));
};
std::unique_ptr<DirectExpressionStep> MockNeverCalledDirectStep() {
auto* mock = new NiceMock<MockDirectStep>();
EXPECT_CALL(*mock, Evaluate).Times(0);
return absl::WrapUnique(mock);
}
std::unique_ptr<DirectExpressionStep> MockExpectCallDirectStep() {
auto* mock = new NiceMock<MockDirectStep>();
EXPECT_CALL(*mock, Evaluate)
.Times(1)
.WillRepeatedly(
[](ExecutionFrameBase& frame, Value& result, AttributeTrail& attr) {
result = ErrorValue(absl::InternalError("expected to be unused"));
return absl::OkStatus();
});
return absl::WrapUnique(mock);
}
class OptionalOrTest : public testing::Test {
public:
OptionalOrTest()
: value_factory_(TypeReflector::Builtin(),
MemoryManagerRef::ReferenceCounting()) {}
protected:
ManagedValueFactory value_factory_;
Activation empty_activation_;
};
TEST_F(OptionalOrTest, OptionalOrLeftPresentShortcutRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1,
CreateConstValueDirectStep(OptionalValue::Of(
value_factory_.get().GetMemoryManager(), IntValue(42))),
MockNeverCalledDirectStep(),
false,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, OptionalValueIs(IntValueIs(42)));
}
TEST_F(OptionalOrTest, OptionalOrLeftErrorShortcutsRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1,
CreateConstValueDirectStep(ErrorValue(absl::InternalError("error"))),
MockNeverCalledDirectStep(),
false,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, ValueKindIs(ValueKind::kError));
}
TEST_F(OptionalOrTest, OptionalOrLeftErrorExhaustiveRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1,
CreateConstValueDirectStep(ErrorValue(absl::InternalError("error"))),
MockExpectCallDirectStep(),
false,
false);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, ValueKindIs(ValueKind::kError));
}
TEST_F(OptionalOrTest, OptionalOrLeftUnknownShortcutsRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(UnknownValue()),
MockNeverCalledDirectStep(),
false,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, ValueKindIs(ValueKind::kUnknown));
}
TEST_F(OptionalOrTest, OptionalOrLeftUnknownExhaustiveRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(UnknownValue()),
MockExpectCallDirectStep(),
false,
false);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, ValueKindIs(ValueKind::kUnknown));
}
TEST_F(OptionalOrTest, OptionalOrLeftAbsentReturnRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(OptionalValue::None()),
CreateConstValueDirectStep(OptionalValue::Of(
value_factory_.get().GetMemoryManager(), IntValue(42))),
false,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, OptionalValueIs(IntValueIs(42)));
}
TEST_F(OptionalOrTest, OptionalOrLeftWrongType) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(IntValue(42)),
MockNeverCalledDirectStep(),
false,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result,
ErrorValueIs(StatusIs(
absl::StatusCode::kUnknown,
HasSubstr(cel::runtime_internal::kErrNoMatchingOverload))));
}
TEST_F(OptionalOrTest, OptionalOrRightWrongType) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(OptionalValue::None()),
CreateConstValueDirectStep(IntValue(42)),
false,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result,
ErrorValueIs(StatusIs(
absl::StatusCode::kUnknown,
HasSubstr(cel::runtime_internal::kErrNoMatchingOverload))));
}
TEST_F(OptionalOrTest, OptionalOrValueLeftPresentShortcutRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1,
CreateConstValueDirectStep(OptionalValue::Of(
value_factory_.get().GetMemoryManager(), IntValue(42))),
MockNeverCalledDirectStep(),
true,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, IntValueIs(42));
}
TEST_F(OptionalOrTest, OptionalOrValueLeftPresentExhaustiveRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1,
CreateConstValueDirectStep(OptionalValue::Of(
value_factory_.get().GetMemoryManager(), IntValue(42))),
MockExpectCallDirectStep(),
true,
false);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, IntValueIs(42));
}
TEST_F(OptionalOrTest, OptionalOrValueLeftErrorShortcutsRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1,
CreateConstValueDirectStep(ErrorValue(absl::InternalError("error"))),
MockNeverCalledDirectStep(),
true,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, ValueKindIs(ValueKind::kError));
}
TEST_F(OptionalOrTest, OptionalOrValueLeftUnknownShortcutsRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(UnknownValue()),
MockNeverCalledDirectStep(), true, true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, ValueKindIs(ValueKind::kUnknown));
}
TEST_F(OptionalOrTest, OptionalOrValueLeftAbsentReturnRight) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(OptionalValue::None()),
CreateConstValueDirectStep(IntValue(42)),
true,
true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result, IntValueIs(42));
}
TEST_F(OptionalOrTest, OptionalOrValueLeftWrongType) {
RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, options, value_factory_.get());
std::unique_ptr<DirectExpressionStep> step = CreateDirectOptionalOrStep(
-1, CreateConstValueDirectStep(IntValue(42)),
MockNeverCalledDirectStep(), true, true);
Value result;
AttributeTrail scratch;
ASSERT_OK(step->Evaluate(frame, result, scratch));
EXPECT_THAT(result,
ErrorValueIs(StatusIs(
absl::StatusCode::kUnknown,
HasSubstr(cel::runtime_internal::kErrNoMatchingOverload))));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/optional_or_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/optional_or_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5e55870b-179b-4c43-afcc-e62f77d5156c | cpp | tensorflow/tensorflow | embedding_lookup_sparse | tensorflow/lite/kernels/embedding_lookup_sparse.cc | tensorflow/lite/kernels/embedding_lookup_sparse_test.cc | #include <stdint.h>
#include <algorithm>
#include <cmath>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 5);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &ids));
TF_LITE_ENSURE_EQ(context, NumDimensions(ids), 1);
TF_LITE_ENSURE_EQ(context, ids->type, kTfLiteInt32);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &indices));
TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 2);
TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32);
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &shape));
TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1);
TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32);
const TfLiteTensor* weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights));
TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1);
TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
SizeOfDimension(ids, 0));
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
SizeOfDimension(weights, 0));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 2);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
output->allocation_type = kTfLiteDynamic;
return kTfLiteOk;
}
void FinalizeAggregation(TfLiteCombinerType combiner, int num_elements,
float current_total_weight,
float current_squares_weight, int embedding_size,
float* output) {
if (combiner != kTfLiteCombinerTypeSum && num_elements > 0) {
float multiplier = 1.0;
switch (combiner) {
case kTfLiteCombinerTypeMean:
multiplier = current_total_weight;
break;
case kTfLiteCombinerTypeSqrtn:
multiplier = std::sqrt(current_squares_weight);
break;
default:
break;
}
for (int k = 0; k < embedding_size; k++) {
output[k] /= multiplier;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteEmbeddingLookupSparseParams*>(node->builtin_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &ids));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &indices));
const TfLiteTensor* dense_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &dense_shape));
const TfLiteTensor* weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 3, &weights));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 4, &value));
const size_t values_size = NumElements(value);
const int lookup_rank = SizeOfDimension(indices, 1);
const int embedding_rank = NumDimensions(value);
const int num_lookups = SizeOfDimension(ids, 0);
const int num_rows = SizeOfDimension(value, 0);
const int output_rank = (lookup_rank - 1) + (embedding_rank - 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(dense_shape, 0), lookup_rank);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
TF_LITE_ENSURE(context, output_shape != nullptr);
int k = 0;
size_t embedding_size = 1;
size_t lookup_size = 1;
for (int i = 0; i < lookup_rank - 1; i++, k++) {
const size_t dim = dense_shape->data.i32[i];
TF_LITE_ENSURE_MSG(
context,
MultiplyAndCheckOverflow(lookup_size, dim, &lookup_size) == kTfLiteOk,
"Lookup size overflowed.");
output_shape->data[k] = dim;
}
for (int i = 1; i < embedding_rank; i++, k++) {
const size_t dim = SizeOfDimension(value, i);
TF_LITE_ENSURE_MSG(context,
MultiplyAndCheckOverflow(embedding_size, dim,
&embedding_size) == kTfLiteOk,
"Embedding size overflowed.");
output_shape->data[k] = dim;
}
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
const size_t output_size = lookup_size * embedding_size;
TfLiteTensorRealloc(output_size * sizeof(float), output);
float* output_ptr = GetTensorData<float>(output);
const float* weights_ptr = GetTensorData<float>(weights);
const float* value_ptr = GetTensorData<float>(value);
TF_LITE_ENSURE(context, output_ptr != nullptr);
std::fill_n(output_ptr, output_size, 0.0f);
int current_output_offset = 0;
float current_total_weight = 0.0;
float current_squares_weight = 0.0;
int num_elements = 0;
for (int i = 0; i < num_lookups; i++) {
int idx = ids->data.i32[i];
if (idx >= num_rows || idx < 0) {
TF_LITE_KERNEL_LOG(context,
"Embedding Lookup Sparse: index out of bounds. "
"Got %d, and bounds are [0, %d]",
idx, num_rows - 1);
return kTfLiteError;
}
const int example_indices_offset = i * lookup_rank;
int output_bucket = 0;
int stride = 1;
for (int k = (lookup_rank - 1) - 1; k >= 0; k--) {
output_bucket += indices->data.i32[example_indices_offset + k] * stride;
stride *= dense_shape->data.i32[k];
}
const int output_offset = output_bucket * embedding_size;
if (output_offset != current_output_offset) {
FinalizeAggregation(params->combiner, num_elements, current_total_weight,
current_squares_weight, embedding_size,
&output_ptr[current_output_offset]);
num_elements = 0;
current_total_weight = 0.0;
current_squares_weight = 0.0;
current_output_offset = output_offset;
}
++num_elements;
const int example_embedding_offset = idx * embedding_size;
const float w = weights_ptr[i];
current_squares_weight += w * w;
current_total_weight += w;
for (int k = 0; k < embedding_size; k++) {
if (current_output_offset + k < 0) continue;
if (current_output_offset + k >= output_size) continue;
if (example_embedding_offset + k < 0) continue;
if (example_embedding_offset + k >= values_size) continue;
output_ptr[current_output_offset + k] +=
value_ptr[example_embedding_offset + k] * w;
}
}
FinalizeAggregation(params->combiner, num_elements, current_total_weight,
current_squares_weight, embedding_size,
&GetTensorData<float>(output)[current_output_offset]);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EMBEDDING_LOOKUP_SPARSE() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <cmath>
#include <functional>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class EmbeddingLookupSparseOpModel : public SingleOpModel {
public:
EmbeddingLookupSparseOpModel(CombinerType type,
std::initializer_list<int> lookup_shape,
std::initializer_list<int> indices_shape,
std::initializer_list<int> dense_shape_shape,
std::initializer_list<int> value_shape) {
lookup_ = AddInput(TensorType_INT32);
indices_ = AddInput(TensorType_INT32);
dense_shape_ = AddInput(TensorType_INT32);
weights_ = AddInput(TensorType_FLOAT32);
value_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
BuiltinOptions_EmbeddingLookupSparseOptions,
CreateEmbeddingLookupSparseOptions(builder_, type).Union());
BuildInterpreter({lookup_shape, indices_shape, dense_shape_shape,
lookup_shape, value_shape});
}
void SetInput(std::initializer_list<int> lookup_data,
std::initializer_list<int> indices_data,
std::initializer_list<int> dense_shape_data,
std::initializer_list<float> weights_data) {
PopulateTensor(lookup_, lookup_data);
PopulateTensor(indices_, indices_data);
PopulateTensor(dense_shape_, dense_shape_data);
PopulateTensor(weights_, weights_data);
}
void Set3DWeightMatrix(const std::function<float(int, int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
int columns = tensor->dims->data[1];
int features = tensor->dims->data[2];
float* tensor_ptr = GetTensorData<float>(tensor);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
for (int k = 0; k < features; k++) {
tensor_ptr[(i * columns + j) * features + k] = function(i, j, k);
}
}
}
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int lookup_;
int weights_;
int indices_;
int dense_shape_;
int value_;
int output_;
};
TEST(EmbeddingLookupSparseOpTest, SimpleTest) {
EmbeddingLookupSparseOpModel m(CombinerType_SUM, {3}, {3, 2}, {2}, {4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 2, 0, 2, 1}, {3, 2}, {1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
6.00, 6.06, 6.60, 6.66, 7.20, 7.26,
})));
}
TEST(EmbeddingLookupSparseOpTest, SimpleTestMean) {
EmbeddingLookupSparseOpModel m(CombinerType_MEAN, {3}, {3, 2}, {2},
{4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 2, 0, 2, 1}, {3, 2}, {1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
})));
}
TEST(EmbeddingLookupSparseOpTest, SimpleTestSqrtn) {
EmbeddingLookupSparseOpModel m(CombinerType_SQRTN, {3}, {3, 2}, {2},
{4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 2, 0, 2, 1}, {3, 2}, {1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
6.00f / std::sqrt(20.0f), 6.06f / std::sqrt(20.0f),
6.60f / std::sqrt(20.0f), 6.66f / std::sqrt(20.0f),
7.20f / std::sqrt(20.0f),
7.26f / std::sqrt(20.0f),
})));
}
TEST(EmbeddingLookupSparseOpTest, Indices3DTest) {
EmbeddingLookupSparseOpModel m(CombinerType_SUM, {3}, {3, 3}, {3}, {4, 3, 2});
m.SetInput({1, 3, 0}, {0, 0, 0, 2, 0, 0, 2, 0, 1}, {3, 2, 2},
{1.0, 2.0, 4.0});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.10, 1.11, 1.20, 1.21, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 6.00, 6.06, 6.60,
6.66, 7.20, 7.26, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup_sparse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup_sparse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebba9ecb-8621-453a-873b-c3414efb080a | cpp | google/arolla | input_loader | arolla/io/input_loader.cc | arolla/io/input_loader_test.cc | #include "arolla/io/input_loader.h"
#include <algorithm>
#include <cstddef>
#include <set>
#include <string>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/string.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
absl::Status ValidateDuplicatedNames(OutputTypesSpan output_types) {
absl::flat_hash_map<std::string, size_t> names_count;
std::vector<std::string> duplicated_names;
for (const auto& [name, type] : output_types) {
size_t& count = names_count[name];
if (count == 1) {
duplicated_names.push_back(name);
}
++count;
}
if (duplicated_names.empty()) {
return absl::OkStatus();
}
std::sort(duplicated_names.begin(), duplicated_names.end());
return absl::FailedPreconditionError(
absl::StrCat("accessors have duplicated names: ",
absl::StrJoin(duplicated_names, ", ")));
}
absl::StatusOr<absl::flat_hash_map<std::string, QTypePtr>> GetInputLoaderQTypes(
const InputLoaderBase& input_loader, absl::Span<const std::string> names) {
absl::flat_hash_map<std::string, QTypePtr> types;
types.reserve(names.size());
std::set<absl::string_view> unknown_types;
for (const auto& name : names) {
if (auto qtype = input_loader.GetQTypeOf(name); qtype != nullptr) {
types.emplace(name, qtype);
} else {
unknown_types.emplace(name);
}
}
if (!unknown_types.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"unknown inputs: %s (available: %s)",
Truncate(absl::StrJoin(unknown_types, ", "), 200),
Truncate(absl::StrJoin(input_loader.SuggestAvailableNames(), ", "),
200)));
}
return types;
}
absl::Status InputLoaderBase::ValidateSlotTypes(
const absl::flat_hash_map<std::string, TypedSlot>& slots) const {
std::vector<std::string> names;
names.reserve(slots.size());
for (const auto& [name, _] : slots) {
names.emplace_back(name);
}
ASSIGN_OR_RETURN(auto types, GetInputLoaderQTypes(*this, names));
return VerifySlotTypes(types, slots,
true,
false);
}
absl::flat_hash_map<std::string, TypedSlot>
InputLoaderBase::ExtractSupportedSlots(
absl::Nonnull<absl::flat_hash_map<std::string, TypedSlot>*> slots) const {
absl::flat_hash_map<std::string, TypedSlot> partial_slots;
for (const auto& [name, slot] : *slots) {
if (GetQTypeOf(name) == nullptr) {
continue;
}
partial_slots.emplace(name, slot);
}
for (const auto& [name, _] : partial_slots) {
slots->erase(name);
}
return partial_slots;
}
} | #include "arolla/io/input_loader.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/types/span.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::InputLoaderSupports;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
struct TestStruct {
int a;
double b;
};
TEST(InputLoaderTest, GetInputLoaderTypes) {
ASSERT_OK_AND_ASSIGN(auto loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(GetInputLoaderQTypes(*loader, {}), IsOkAndHolds(IsEmpty()));
EXPECT_THAT(
GetInputLoaderQTypes(*loader, {"a"}),
IsOkAndHolds(UnorderedElementsAre(Pair("a", GetQType<int32_t>()))));
EXPECT_THAT(
GetInputLoaderQTypes(*loader, {"a", "b"}),
IsOkAndHolds(UnorderedElementsAre(Pair("a", GetQType<int32_t>()),
Pair("b", GetQType<double>()))));
EXPECT_THAT(GetInputLoaderQTypes(*loader, {"a", "b", "c"}),
StatusIs(absl::StatusCode::kInvalidArgument,
"unknown inputs: c (available: a, b)"));
}
TEST(InputLoaderTest, ChainInputLoaderConflict) {
ASSERT_OK_AND_ASSIGN(auto loader1,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(auto loader2,
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return 2 * s.b; },
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(auto chain_loader,
ChainInputLoader<TestStruct>::Build(std::move(loader1),
std::move(loader2)));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
chain_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
}
TEST(InputLoaderTest, MakeNotOwningInputLoader) {
ASSERT_OK_AND_ASSIGN(std::unique_ptr<InputLoader<TestStruct>> wrapped_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
std::unique_ptr<InputLoader<TestStruct>> not_owning_loader =
MakeNotOwningInputLoader(wrapped_loader.get());
EXPECT_THAT(not_owning_loader->GetQTypeOf("a"), Eq(GetQType<int32_t>()));
EXPECT_THAT(not_owning_loader->GetQTypeOf("b"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
FrameLayout memory_layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
not_owning_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
TEST(InputLoaderTest, MakeSharedOwningInputLoader) {
std::unique_ptr<InputLoader<TestStruct>> shared_owning_loader;
{
ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const InputLoader<TestStruct>> wrapped_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
shared_owning_loader = MakeSharedOwningInputLoader(wrapped_loader);
}
EXPECT_THAT(shared_owning_loader->GetQTypeOf("a"), Eq(GetQType<int32_t>()));
EXPECT_THAT(shared_owning_loader->GetQTypeOf("b"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
FrameLayout memory_layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
shared_owning_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
TEST(InputLoaderTest, BindInputLoaderList) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return int{0}; },
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(
std::vector<BoundInputLoader<TestStruct>> bound_input_loaders,
BindInputLoaderList<TestStruct>(input_loaders,
{
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
MemoryAllocation alloc(&memory_layout);
TestStruct input{5, 3.5};
for (const auto& bound_input_loader : bound_input_loaders) {
ASSERT_OK(bound_input_loader(input, alloc.frame()));
}
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, BindInputLoaderListErrors) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(
BindInputLoaderList<TestStruct>(input_loaders,
{
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}),
StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("not all")));
}
TEST(InputLoaderTest, FilteringInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
ASSERT_OK_AND_ASSIGN(auto inner_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(inner_loader->GetQTypeOf("a"), Eq(i32));
EXPECT_THAT(inner_loader->GetQTypeOf("b"), Eq(f64));
auto filtered_loader =
MakeFilteringInputLoader(std::move(inner_loader), {"a"});
EXPECT_THAT(filtered_loader->GetQTypeOf("a"), Eq(i32));
EXPECT_THAT(filtered_loader->GetQTypeOf("b"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(filtered_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"unknown inputs: b (available: a)"));
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
filtered_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
TEST(InputLoaderTest, ChainInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
std::unique_ptr<InputLoader<TestStruct>> chain_input_loader;
{
ASSERT_OK_AND_ASSIGN(auto loader1,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(auto loader2,
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(
auto loader3, CreateAccessorsInputLoader<TestStruct>(
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(
chain_input_loader,
ChainInputLoader<TestStruct>::Build(
std::move(loader1), std::move(loader2), std::move(loader3)));
}
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(*chain_input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"c", f64}}));
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
chain_input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, ChainInputLoaderFactoryPropagated) {
auto qbool = GetQType<bool>();
std::unique_ptr<InputLoader<TestStruct>> input_loader;
UnsafeArenaBufferFactory global_factory1(1000);
UnsafeArenaBufferFactory global_factory2(1000);
{
ASSERT_OK_AND_ASSIGN(auto loader1, CreateAccessorsInputLoader<TestStruct>(
"a", [&](const TestStruct&,
RawBufferFactory* factory) {
return factory == &global_factory1;
}));
ASSERT_OK_AND_ASSIGN(auto loader2, CreateAccessorsInputLoader<TestStruct>(
"b", [&](const TestStruct&,
RawBufferFactory* factory) {
return factory == &global_factory2;
}));
ASSERT_OK_AND_ASSIGN(
input_loader, ChainInputLoader<TestStruct>::Build(std::move(loader1),
std::move(loader2)));
}
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<bool>();
auto b_slot = layout_builder.AddSlot<bool>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", qbool}, {"b", qbool}}));
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory1));
EXPECT_TRUE(alloc.frame().Get(a_slot));
EXPECT_FALSE(alloc.frame().Get(b_slot));
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory2));
EXPECT_FALSE(alloc.frame().Get(a_slot));
EXPECT_TRUE(alloc.frame().Get(b_slot));
}
TEST(InputLoaderTest, ChainInputLoaderWithCustomInvoke) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
std::unique_ptr<InputLoader<TestStruct>> chain_input_loader;
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
int64_t number_of_loaders = -1;
{
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(
input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(
chain_input_loader,
ChainInputLoader<TestStruct>::Build(
std::move(input_loaders),
[&number_of_loaders](
absl::Span<const BoundInputLoader<TestStruct>> loaders,
const TestStruct& input, FramePtr frame,
RawBufferFactory* factory) {
number_of_loaders = loaders.size();
return ChainInputLoader<TestStruct>::InvokeBoundLoaders(
loaders, input, frame, factory);
}));
EXPECT_THAT(*chain_input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"c", f64}}));
}
BoundInputLoader<TestStruct> bound_input_loader(nullptr);
{
ASSERT_OK_AND_ASSIGN(bound_input_loader,
chain_input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
}
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(number_of_loaders, 3);
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, ChainInputLoaderWithCustomInvokeOptimized) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
std::unique_ptr<InputLoader<TestStruct>> chain_input_loader;
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
FrameLayout memory_layout = std::move(layout_builder).Build();
int64_t number_of_loaders = -1;
{
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(
chain_input_loader,
ChainInputLoader<TestStruct>::Build(
std::move(input_loaders),
[&number_of_loaders](
absl::Span<const BoundInputLoader<TestStruct>> loaders,
const TestStruct& input, FramePtr frame,
RawBufferFactory* factory) {
number_of_loaders = loaders.size();
return ChainInputLoader<TestStruct>::InvokeBoundLoaders(
loaders, input, frame, factory);
}));
EXPECT_THAT(*chain_input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}}));
}
BoundInputLoader<TestStruct> bound_input_loader(nullptr);
{
ASSERT_OK_AND_ASSIGN(bound_input_loader,
chain_input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
}
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(number_of_loaders, -1);
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/input_loader.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/input_loader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
deff4997-313c-46ca-95fa-0d88650e66ce | cpp | google/cel-cpp | validation_result | checker/validation_result.h | checker/validation_result_test.cc | #ifndef THIRD_PARTY_CEL_CPP_CHECKER_VALIDATION_RESULT_H_
#define THIRD_PARTY_CEL_CPP_CHECKER_VALIDATION_RESULT_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "checker/type_check_issue.h"
#include "common/ast.h"
namespace cel {
class ValidationResult {
public:
ValidationResult(std::unique_ptr<Ast> ast, std::vector<TypeCheckIssue> issues)
: ast_(std::move(ast)), issues_(std::move(issues)) {}
explicit ValidationResult(std::vector<TypeCheckIssue> issues)
: ast_(nullptr), issues_(std::move(issues)) {}
bool IsValid() const { return ast_ != nullptr; }
absl::Nullable<const Ast*> GetAst() const { return ast_.get(); }
absl::StatusOr<std::unique_ptr<Ast>> ReleaseAst() {
if (ast_ == nullptr) {
return absl::FailedPreconditionError(
"ValidationResult is empty. Check for TypeCheckIssues.");
}
return std::move(ast_);
}
absl::Span<const TypeCheckIssue> GetIssues() const { return issues_; }
private:
absl::Nullable<std::unique_ptr<Ast>> ast_;
std::vector<TypeCheckIssue> issues_;
};
}
#endif | #include "checker/validation_result.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "base/ast_internal/ast_impl.h"
#include "checker/type_check_issue.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::ast_internal::AstImpl;
using ::testing::_;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
using Severity = TypeCheckIssue::Severity;
TEST(ValidationResultTest, IsValidWithAst) {
ValidationResult result(std::make_unique<AstImpl>(), {});
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetAst(), NotNull());
EXPECT_THAT(result.ReleaseAst(), IsOkAndHolds(NotNull()));
}
TEST(ValidationResultTest, IsNotValidWithoutAst) {
ValidationResult result({});
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetAst(), IsNull());
EXPECT_THAT(result.ReleaseAst(),
StatusIs(absl::StatusCode::kFailedPrecondition, _));
}
TEST(ValidationResultTest, GetIssues) {
ValidationResult result(
{TypeCheckIssue::CreateError({-1, -1}, "Issue1"),
TypeCheckIssue(Severity::kInformation, {-1, -1}, "Issue2")});
EXPECT_FALSE(result.IsValid());
ASSERT_THAT(result.GetIssues(), SizeIs(2));
EXPECT_THAT(result.GetIssues()[0].message(), "Issue1");
EXPECT_THAT(result.GetIssues()[0].severity(), Severity::kError);
EXPECT_THAT(result.GetIssues()[1].message(), "Issue2");
EXPECT_THAT(result.GetIssues()[1].severity(), Severity::kInformation);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/validation_result.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/validation_result_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
8f4efb58-88af-470d-a5f6-790410917c2f | cpp | tensorflow/tensorflow | buf_rendezvous | tensorflow/core/common_runtime/buf_rendezvous.cc | tensorflow/core/common_runtime/buf_rendezvous_test.cc | #include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
namespace tensorflow {
namespace {
void DeregisterCancellation(BufRendezvous::Hook* h) {
if (h->cancellation_manager != nullptr) {
h->cancellation_manager->DeregisterCallback(h->cancellation_token);
h->cancellation_manager = nullptr;
h->cancellation_token = CancellationManager::kInvalidToken;
}
}
}
BufRendezvous::~BufRendezvous() {
mutex_lock l(mu_);
if (!hook_table_.empty()) {
PurgeTable(errors::Internal("Delete called on non-empty BufRendezvous"),
&hook_table_);
}
}
void BufRendezvous::StartAbort(const Status& s) {
CHECK(!s.ok());
HookTable dummy_table;
{
mutex_lock l(mu_);
status_.Update(StatusGroup::MakeDerived(s));
hook_table_.swap(dummy_table);
}
PurgeTable(s, &dummy_table);
}
void BufRendezvous::PurgeTable(const Status& s, HookTable* table) {
for (auto& it : *table) {
Hook* h = it.second;
if (h->cancellation_manager != nullptr) {
h->cancellation_manager->TryDeregisterCallback(h->cancellation_token);
}
if (h->cons_cb != nullptr) {
h->cons_cb(s, nullptr);
}
if (h->prod_cb != nullptr) {
h->prod_cb(s);
}
delete h;
}
table->clear();
}
string BufRendezvous::Hook::DebugString() const {
return absl::StrCat(
"[dev:", (prod_dev ? prod_dev->name() : "none"),
", ctx:", reinterpret_cast<uint64>(prod_ctx),
", val:", reinterpret_cast<uint64>(prod_value),
", pcb:", prod_cb ? reinterpret_cast<uint64>(&prod_cb) : 0,
", ccb:", cons_cb ? reinterpret_cast<uint64>(&cons_cb) : 0, "]");
}
void BufRendezvous::ProvideBuf(const string& key, Device* dev,
DeviceContext* dev_ctx, const Tensor* v,
const AllocatorAttributes& attr,
const ProducerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ProvideBuf: key = " << key;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Hook* h = nullptr;
Status providebuf_status;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
providebuf_status = status_;
break;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
auto it = hook_table_.find(key);
if (it == hook_table_.end()) {
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
}
h = new Hook(cancellation_manager, cancellation_token);
it = hook_table_.insert(std::make_pair(key, h)).first;
} else {
if (it->second->prod_cb != nullptr) {
providebuf_status = errors::Internal(
"BufRendezvous::ProvideBuf already called for key ", key);
break;
}
h = it->second;
}
h->prod_dev = dev;
h->prod_ctx = dev_ctx;
h->prod_value = v;
h->prod_attr = attr;
h->prod_cb = done;
if (h->cons_cb != nullptr) {
hook_table_.erase(it);
} else {
if (cancellation_manager != nullptr &&
!cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); })) {
providebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
hook_table_.erase(it);
delete h;
}
h = nullptr;
}
}
} while (false);
if (h) {
DVLOG(4) << "ProvideBuf: key = " << key << ": calling cons_cb"
<< h->DebugString();
DeregisterCancellation(h);
h->cons_cb(absl::OkStatus(), h);
}
if (!providebuf_status.ok()) {
done(providebuf_status);
}
}
void BufRendezvous::ConsumeBuf(const string& key, const string& device_name,
const uint64 device_incarnation,
const ConsumerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ConsumeBuf: key = " << key << " device_name = " << device_name;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Device* device;
Status consumebuf_status = dev_mgr_->LookupDevice(device_name, &device);
if (consumebuf_status.ok() &&
device->attributes().incarnation() != device_incarnation) {
consumebuf_status = errors::FailedPrecondition(
"RecvBuf expects a different device incarnation: ", device_incarnation,
" vs. ", device->attributes().incarnation(),
". Your worker job that contains the device (\"", device_name,
"\") was probably restarted. Check your "
"worker job for the reason why it was restarted.");
}
if (!consumebuf_status.ok()) {
done(consumebuf_status, nullptr);
return;
}
Hook* existing_hook = nullptr;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
consumebuf_status = status_;
break;
}
auto it = hook_table_.find(key);
if (it != hook_table_.end()) {
if (it->second->cons_cb) {
consumebuf_status =
errors::Internal("Second consumer arrived for key ", key);
break;
}
existing_hook = it->second;
hook_table_.erase(it);
existing_hook->cons_cb = done;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
bool already_cancelled = false;
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
already_cancelled = !cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); });
}
if (already_cancelled) {
consumebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
} else {
Hook* h = new Hook(cancellation_manager, cancellation_token);
h->cons_cb = done;
it = hook_table_.insert(std::make_pair(key, h)).first;
return;
}
}
} while (false);
if (existing_hook) {
DVLOG(4) << "ConsumeBuf: key = " << key << ": calling cons_cb"
<< existing_hook->DebugString();
DeregisterCancellation(existing_hook);
existing_hook->cons_cb(absl::OkStatus(), existing_hook);
return;
}
if (!consumebuf_status.ok()) {
done(consumebuf_status, nullptr);
return;
}
}
void BufRendezvous::CancelHook(const string& key) {
Hook* h = nullptr;
{
mutex_lock l(mu_);
auto it = hook_table_.find(key);
if (it == hook_table_.end()) return;
h = it->second;
hook_table_.erase(it);
}
if (h != nullptr) {
auto s = errors::Cancelled("Operation was cancelled for BufRendezvous key ",
key);
if (h->prod_cb != nullptr) {
h->prod_cb(s);
}
if (h->cons_cb != nullptr) {
h->cons_cb(s, nullptr);
}
delete h;
}
}
void BufRendezvous::DoneWithHook(Hook* h) {
h->prod_cb(absl::OkStatus());
delete h;
}
void BufRendezvous::LogContents() {
mutex_lock l(mu_);
LOG(INFO) << strings::StrCat("BufRendezvous ",
strings::Hex(reinterpret_cast<uint64>(this)),
" step_id=", step_id_, " current contents:");
for (const auto& it : hook_table_) {
LOG(INFO) << it.first << ":" << it.second->DebugString();
}
}
} | #include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class BufRendezvousTest : public ::testing::Test {
protected:
static std::unique_ptr<Device> NewDevice(const string& name,
const string& type,
const uint64 incarnation) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attrs)
: Device(nullptr, attrs) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attrs;
attrs.set_name(name);
attrs.set_device_type(type);
attrs.set_incarnation(incarnation);
return std::make_unique<FakeDevice>(attrs);
}
void InitializeDevice(const string& device, const string& type,
const uint64 incarnation) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(NewDevice(device, type, incarnation));
dev_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
br_ = std::make_unique<BufRendezvous>(123, dev_mgr_.get());
}
BufRendezvousTest()
: a_(Tensor(DT_FLOAT, TensorShape({24}))),
b_(Tensor(DT_FLOAT, TensorShape({24}))),
fake_device_context_(reinterpret_cast<DeviceContext*>(1024LLU)) {
InitializeDevice(*kDefaultDeviceName, "CPU", kDefaultIncarnation);
TF_CHECK_OK(dev_mgr_->LookupDevice(*kDefaultDeviceName, &default_device_));
}
Tensor a_;
Tensor b_;
AllocatorAttributes aa_;
Device* default_device_;
DeviceContext* fake_device_context_;
std::unique_ptr<DeviceMgr> dev_mgr_;
std::unique_ptr<BufRendezvous> br_;
CancellationManager cm_;
static const string* const kDefaultKey;
static const string* const kDefaultDeviceName;
static const uint64 kDefaultIncarnation;
};
const string* const BufRendezvousTest::kDefaultKey = new string("key0");
const string* const BufRendezvousTest::kDefaultDeviceName =
new string("/device:CPU:0");
const uint64 BufRendezvousTest::kDefaultIncarnation = 12345;
TEST_F(BufRendezvousTest, CorrectUseProducerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CorrectUseConsumerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_FALSE(cons_callback_called);
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, ErrorDuplicatePut) {
bool prod_callback_called = false;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_callback_called](const Status& s) { prod_callback_called = true; },
&cm_);
Status bad_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&bad_status, ¬e](const Status& s) {
bad_status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_FALSE(bad_status.ok());
EXPECT_EQ(absl::StrCat("BufRendezvous::ProvideBuf already called for key ",
*kDefaultKey),
bad_status.message());
EXPECT_FALSE(prod_callback_called);
br_.reset();
}
TEST_F(BufRendezvousTest, ErrorDeleteNonEmpty) {
Status cons_status;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_EQ(h, nullptr);
},
&cm_);
EXPECT_TRUE(cons_status.ok());
br_.reset();
EXPECT_FALSE(cons_status.ok());
EXPECT_EQ("Delete called on non-empty BufRendezvous", cons_status.message());
}
TEST_F(BufRendezvousTest, AbortNonEmpty) {
Status cons_status;
Status prod_status;
Notification prod_note;
Notification cons_note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
cons_note.Notify();
},
&cm_);
br_->ProvideBuf(
"key1", default_device_, fake_device_context_, &a_, aa_,
[&prod_note, &prod_status](const Status& s) {
prod_status = s;
prod_note.Notify();
},
&cm_);
br_->StartAbort(errors::Internal("Falling sky detected"));
prod_note.WaitForNotification();
cons_note.WaitForNotification();
EXPECT_FALSE(prod_status.ok());
EXPECT_EQ(prod_status.message(), "Falling sky detected");
EXPECT_FALSE(cons_status.ok());
EXPECT_EQ(cons_status.message(), "Falling sky detected");
}
TEST_F(BufRendezvousTest, AbortEmpty) {
br_->StartAbort(errors::Internal("Falling sky detected"));
}
TEST_F(BufRendezvousTest, UseAfterAbort) {
br_->StartAbort(errors::Internal("Falling sky detected"));
Status cons_status;
Status prod_status;
Notification prod_note;
Notification cons_note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
cons_note.Notify();
},
&cm_);
br_->ProvideBuf(
"key1", default_device_, fake_device_context_, &a_, aa_,
[&prod_note, &prod_status](const Status& s) {
prod_status = s;
prod_note.Notify();
},
&cm_);
prod_note.WaitForNotification();
cons_note.WaitForNotification();
EXPECT_FALSE(prod_status.ok());
EXPECT_NE(prod_status.message().find("Falling sky detected"), string::npos);
EXPECT_FALSE(cons_status.ok());
EXPECT_NE(cons_status.message().find("Falling sky detected"), string::npos);
}
TEST_F(BufRendezvousTest, DeviceIncarnationMismatch) {
Status cons_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[](const Status&) {}, nullptr);
const uint64 incorrect_incarnation = 23456;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, incorrect_incarnation,
[¬e, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
note.Notify();
},
nullptr);
note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(cons_status));
}
TEST_F(BufRendezvousTest, ProvideThenCancel) {
Status status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenProvide) {
Status status;
Notification note;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ConsumeThenCancel) {
Status status;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&status, ¬e](const Status& s, BufRendezvous::Hook* h) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenConsume) {
Status status;
Notification note;
cm_.StartCancel();
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&status, ¬e](const Status& s, BufRendezvous::Hook* h) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ProvideConsumeThenCancel) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
note.WaitForNotification();
cm_.StartCancel();
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CancelThenProvideConsume) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
EXPECT_TRUE(errors::IsCancelled(prod_status));
prod_callback_called = true;
},
&cm_);
EXPECT_TRUE(prod_callback_called);
EXPECT_TRUE(errors::IsCancelled(prod_status));
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_TRUE(errors::IsCancelled(cons_status));
cons_callback_called = true;
},
&cm_);
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(errors::IsCancelled(cons_status));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/buf_rendezvous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/buf_rendezvous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2478c4a4-5745-42fe-abaf-6b65cb3276c5 | cpp | google/cel-cpp | list_type | common/types/list_type.cc | common/types/list_type_test.cc | #include <string>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "common/type.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
namespace cel {
namespace common_internal {
namespace {
ABSL_CONST_INIT const ListTypeData kDynListTypeData;
}
absl::Nonnull<ListTypeData*> ListTypeData::Create(
absl::Nonnull<google::protobuf::Arena*> arena, const Type& element) {
return ::new (arena->AllocateAligned(
sizeof(ListTypeData), alignof(ListTypeData))) ListTypeData(element);
}
ListTypeData::ListTypeData(const Type& element) : element(element) {}
}
ListType::ListType() : ListType(&common_internal::kDynListTypeData) {}
ListType::ListType(absl::Nonnull<google::protobuf::Arena*> arena, const Type& element)
: ListType(element.IsDyn()
? &common_internal::kDynListTypeData
: common_internal::ListTypeData::Create(arena, element)) {}
std::string ListType::DebugString() const {
return absl::StrCat("list<", element().DebugString(), ">");
}
TypeParameters ListType::GetParameters() const {
return TypeParameters(GetElement());
}
Type ListType::GetElement() const {
ABSL_DCHECK_NE(data_, 0);
if ((data_ & kBasicBit) == kBasicBit) {
return reinterpret_cast<const common_internal::ListTypeData*>(data_ &
kPointerMask)
->element;
}
if ((data_ & kProtoBit) == kProtoBit) {
return common_internal::SingularMessageFieldType(
reinterpret_cast<const google::protobuf::FieldDescriptor*>(data_ & kPointerMask));
}
return Type();
}
Type ListType::element() const { return GetElement(); }
} | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
TEST(ListType, Default) {
ListType list_type;
EXPECT_EQ(list_type.element(), DynType());
}
TEST(ListType, Kind) {
google::protobuf::Arena arena;
EXPECT_EQ(ListType(&arena, BoolType()).kind(), ListType::kKind);
EXPECT_EQ(Type(ListType(&arena, BoolType())).kind(), ListType::kKind);
}
TEST(ListType, Name) {
google::protobuf::Arena arena;
EXPECT_EQ(ListType(&arena, BoolType()).name(), ListType::kName);
EXPECT_EQ(Type(ListType(&arena, BoolType())).name(), ListType::kName);
}
TEST(ListType, DebugString) {
google::protobuf::Arena arena;
{
std::ostringstream out;
out << ListType(&arena, BoolType());
EXPECT_EQ(out.str(), "list<bool>");
}
{
std::ostringstream out;
out << Type(ListType(&arena, BoolType()));
EXPECT_EQ(out.str(), "list<bool>");
}
}
TEST(ListType, Hash) {
google::protobuf::Arena arena;
EXPECT_EQ(absl::HashOf(ListType(&arena, BoolType())),
absl::HashOf(ListType(&arena, BoolType())));
}
TEST(ListType, Equal) {
google::protobuf::Arena arena;
EXPECT_EQ(ListType(&arena, BoolType()), ListType(&arena, BoolType()));
EXPECT_EQ(Type(ListType(&arena, BoolType())), ListType(&arena, BoolType()));
EXPECT_EQ(ListType(&arena, BoolType()), Type(ListType(&arena, BoolType())));
EXPECT_EQ(Type(ListType(&arena, BoolType())),
Type(ListType(&arena, BoolType())));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/list_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/list_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
517e8f1b-746b-423f-b004-526620287e60 | cpp | tensorflow/tensorflow | rgb_to_yuv | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.cc | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rgb_to_yuv {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kRgb2YuvKernel[] = {0.299f, 0.587f, 0.114f,
-0.14714119f, -0.28886916f, 0.43601035f,
0.61497538f, -0.51496512f, -0.10001026f};
constexpr int kRgb2YuvKernelSize =
sizeof(kRgb2YuvKernel) / sizeof(kRgb2YuvKernel[0]);
void ComputeRgbToYuv(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
TFLITE_DCHECK(channels == 3);
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kRgb2YuvKernel[0], kRgb2YuvKernelSize);
}
}
const Algo* Impl_RgbToYuv() {
static const Algo rgb_to_yuv = {&ComputeRgbToYuv, nullptr};
return &rgb_to_yuv;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rgb_to_yuv {
namespace {
struct Rgb2YuvTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class Rgb2YuvTest : public ::testing::TestWithParam<Rgb2YuvTestParams> {};
TEST_P(Rgb2YuvTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const Rgb2YuvTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rgb_to_yuv = Impl_RgbToYuv();
rgb_to_yuv->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
Rgb2YuvTests, Rgb2YuvTest,
testing::ValuesIn({
Rgb2YuvTestParams{{1, 3, 2, 3},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{1, 3, 2, 3}},
Rgb2YuvTestParams{{2, 3, 2, 3},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{2, 3, 2, 3}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a537f92-18a4-4555-9980-088c5e663ff9 | cpp | tensorflow/tensorflow | dot_sparsity_rewriter | third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter.cc | third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter_test.cc | #include "xla/service/gpu/transforms/dot_sparsity_rewriter.h"
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class SparseDotRewriterImpl : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* instr) override {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
if (dot->sparse_operands() != 1 || dot->sparsity().front().index() != 1) {
return absl::OkStatus();
}
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
HloInstruction* meta = dot->mutable_operand(2);
DotDimensionNumbers dnums = dot->dot_dimension_numbers();
std::swap(*dnums.mutable_lhs_batch_dimensions(),
*dnums.mutable_rhs_batch_dimensions());
std::swap(*dnums.mutable_lhs_contracting_dimensions(),
*dnums.mutable_rhs_contracting_dimensions());
PrecisionConfig precision_config = dot->precision_config();
std::swap(precision_config.mutable_operand_precision()->at(0),
precision_config.mutable_operand_precision()->at(1));
SparsityDescriptor sparsity = dot->sparsity().front();
sparsity.set_index(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dot,
MakeDotHlo(rhs, lhs, dnums, precision_config,
dot->shape().element_type(), {std::move(sparsity)}, {meta}));
dot->SetupDerivedInstruction(new_dot);
int batch_dims = dnums.lhs_batch_dimensions().size();
int new_lhs_noncontracting = rhs->shape().rank() - batch_dims -
dnums.lhs_contracting_dimensions().size();
int new_rhs_noncontracting = lhs->shape().rank() - batch_dims -
dnums.rhs_contracting_dimensions().size();
int rank = dot->shape().rank();
DimensionVector dimensions(rank);
for (int i = 0; i < batch_dims; ++i) {
dimensions[i] = i;
}
for (int i = 0; i < new_lhs_noncontracting; ++i) {
dimensions[i + batch_dims] = i + batch_dims + new_rhs_noncontracting;
}
for (int i = 0; i < new_rhs_noncontracting; ++i) {
dimensions[i + batch_dims + new_lhs_noncontracting] = i + batch_dims;
}
TF_ASSIGN_OR_RETURN(HloInstruction * transpose,
MakeTransposeHlo(new_dot, dimensions));
transpose->set_metadata(dot->metadata());
*transpose->mutable_shape()->mutable_layout() = dot->shape().layout();
return ReplaceInstruction(dot, transpose);
}
};
}
absl::StatusOr<bool> DotSparsityRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return SparseDotRewriterImpl().RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/transforms/dot_sparsity_rewriter.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class DotSparsityRewriterTest : public HloTestBase {
public:
DotSparsityRewriterTest() : HloTestBase(true) {}
};
TEST_F(DotSparsityRewriterTest, SparseDotRhsToLhs) {
const char* module_string = R"(
HloModule m
ENTRY e {
lhs = f16[4,2,16,8,64] parameter(0)
rhs = f16[2,4,8,32,128] parameter(1)
meta = u16[2,4,8,4,128] parameter(2)
ROOT dot = f16[4,2,16,128] dot(lhs, rhs, meta),
lhs_contracting_dims={3,4}, rhs_contracting_dims={2,3},
lhs_batch_dims={0,1}, rhs_batch_dims={1,0}, sparsity=R.3@2:4
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotSparsityRewriter().Run(module.get()));
EXPECT_TRUE(modified);
const HloTransposeInstruction* transpose = DynCast<HloTransposeInstruction>(
module->entry_computation()->root_instruction());
ASSERT_TRUE(transpose != nullptr);
EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 1, 3, 2));
const HloDotInstruction* dot =
DynCast<HloDotInstruction>(transpose->operand(0));
ASSERT_TRUE(dot != nullptr);
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
EXPECT_EQ(dnums.lhs_contracting_dimensions(0), 2);
EXPECT_EQ(dnums.lhs_contracting_dimensions(1), 3);
EXPECT_EQ(dnums.rhs_contracting_dimensions(0), 3);
EXPECT_EQ(dnums.rhs_contracting_dimensions(1), 4);
EXPECT_EQ(dnums.lhs_batch_dimensions(0), 1);
EXPECT_EQ(dnums.lhs_batch_dimensions(1), 0);
EXPECT_EQ(dnums.rhs_batch_dimensions(0), 0);
EXPECT_EQ(dnums.rhs_batch_dimensions(1), 1);
EXPECT_EQ(dot->sparse_operands(), 1);
EXPECT_EQ(dot->sparsity().front().index(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c81ba67-e18a-46de-ad5f-6fb9b38323c9 | cpp | tensorflow/tensorflow | ragged_tensor_to_variant_op | tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | tensorflow/core/kernels/ragged_tensor_to_variant_op_test.cc | #include <cstdint>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/kernels/concat_lib.h"
#include "tensorflow/core/kernels/ragged_tensor_variant.h"
#include "tensorflow/core/kernels/ragged_utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/tensor_ops_util.h"
namespace tensorflow {
namespace {
template <typename VALUE_TYPE>
Status UnbatchDenseZerothDim(
const RaggedTensorVariant& batched_ragged,
std::vector<RaggedTensorVariant>* ragged_components) {
Tensor batched_values = batched_ragged.values();
TensorShape values_shape = batched_values.shape();
if (values_shape.dims() < 1) {
return errors::InvalidArgument("Can't unbatch rank-0 tensor.");
}
auto num_components = values_shape.dim_size(0);
values_shape.RemoveDim(0);
auto num_values = values_shape.num_elements();
ragged_components->resize(num_components);
const auto& batched_flat = batched_values.flat<VALUE_TYPE>();
for (auto i = decltype(num_components){}; i < num_components; i++) {
(*ragged_components)[i].set_values(
Tensor(DataTypeToEnum<VALUE_TYPE>::value, values_shape));
auto ragged_component_values_flat =
(*ragged_components)[i].mutable_values()->flat<VALUE_TYPE>();
for (auto j = decltype(num_values){}; j < num_values; j++) {
ragged_component_values_flat(j) = batched_flat(j + i * num_values);
}
}
return absl::OkStatus();
}
template <typename VALUE_TYPE, typename SPLIT_TYPE>
Status UnbatchRaggedZerothDim(
const RaggedTensorVariant& batched_ragged,
std::vector<RaggedTensorVariant>* ragged_components) {
int ragged_rank = batched_ragged.ragged_rank();
if (ragged_rank == 0) {
return UnbatchDenseZerothDim<VALUE_TYPE>(batched_ragged, ragged_components);
}
auto batched_splits_top_vec = batched_ragged.splits(0).vec<SPLIT_TYPE>();
auto num_components = batched_splits_top_vec.size() - 1;
if (num_components < 0) {
return errors::Internal("Invalid split argument.");
}
int num_splits = ragged_rank - 1;
ragged_components->resize(num_components);
for (RaggedTensorVariant& ragged_component : *ragged_components) {
ragged_component.mutable_nested_splits()->reserve(num_splits);
}
const auto& batched_flat = batched_ragged.values().flat<VALUE_TYPE>();
auto num_inner_elems = batched_ragged.values().NumElements();
if (batched_ragged.values().dim_size(0) > 1) {
num_inner_elems /= batched_ragged.values().dim_size(0);
}
TensorShape values_shape = batched_ragged.values().shape();
if (num_splits == 0) {
for (auto i = decltype(num_components){}; i < num_components; i++) {
auto start = batched_splits_top_vec(i);
auto limit = batched_splits_top_vec(i + 1);
auto num_values = limit - start;
values_shape.set_dim(0, num_values);
(*ragged_components)[i].set_values(
Tensor(DataTypeToEnum<VALUE_TYPE>::value, values_shape));
auto ragged_component_values_flat =
(*ragged_components)[i].mutable_values()->template flat<VALUE_TYPE>();
for (auto j = decltype(num_values * num_inner_elems){};
j < num_values * num_inner_elems; j++) {
ragged_component_values_flat(j) =
batched_flat(j + start * num_inner_elems);
}
}
return absl::OkStatus();
}
std::vector<typename TTypes<SPLIT_TYPE>::ConstVec> batched_splits_vec;
batched_splits_vec.reserve(ragged_rank);
for (int i = 0; i < ragged_rank; i++) {
batched_splits_vec.push_back(batched_ragged.splits(i).vec<SPLIT_TYPE>());
}
std::vector<SPLIT_TYPE> index(num_splits, 1);
std::vector<SPLIT_TYPE> ragged_component_values_size(num_components, 0);
for (auto i = decltype(num_components){}; i < num_components; i++) {
std::vector<typename TTypes<SPLIT_TYPE>::Vec> ragged_component_splits_vec;
ragged_component_splits_vec.reserve(num_splits);
SPLIT_TYPE split_size = -1;
for (int j = 0; j < num_splits; j++) {
if (j == 0) {
split_size =
batched_splits_top_vec(i + 1) - batched_splits_top_vec(i) + 1;
} else {
SPLIT_TYPE last_index = ragged_component_splits_vec[j - 1].size() - 1;
split_size = ragged_component_splits_vec[j - 1](last_index) + 1;
}
(*ragged_components)[i].append_splits(
Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size})));
ragged_component_splits_vec.push_back((*ragged_components)[i]
.mutable_splits(j)
->template vec<SPLIT_TYPE>());
SPLIT_TYPE last_split_value = batched_splits_vec[j + 1](index[j] - 1);
ragged_component_splits_vec[j](0) = 0;
for (SPLIT_TYPE k = 1; k < split_size; k++, index[j]++) {
ragged_component_splits_vec[j](k) =
batched_splits_vec[j + 1](index[j]) - last_split_value;
}
}
SPLIT_TYPE last_split_size =
ragged_component_splits_vec[num_splits - 1].size();
ragged_component_values_size[i] =
ragged_component_splits_vec[num_splits - 1](last_split_size - 1);
}
int64_t value_index = 0;
for (auto i = decltype(num_components){}; i < num_components; i++) {
SPLIT_TYPE num_values = ragged_component_values_size[i];
values_shape.set_dim(0, num_values);
(*ragged_components)[i].set_values(
Tensor(DataTypeToEnum<VALUE_TYPE>::value, values_shape));
auto ragged_component_values_flat =
(*ragged_components)[i].mutable_values()->template flat<VALUE_TYPE>();
for (int64_t j = 0; j < num_values * num_inner_elems; j++, value_index++) {
ragged_component_values_flat(j) = batched_flat(value_index);
}
}
return absl::OkStatus();
}
}
template <typename VALUE_TYPE, typename SPLIT_TYPE>
class RaggedTensorToVariantOp : public OpKernel {
public:
explicit RaggedTensorToVariantOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("batched_input", &batched_input_));
}
void Compute(OpKernelContext* context) override {
OpInputList ragged_nested_splits_in;
OP_REQUIRES_OK(context, context->input_list("rt_nested_splits",
&ragged_nested_splits_in));
const int ragged_nested_splits_len = ragged_nested_splits_in.size();
RaggedTensorVariant batched_ragged_input;
batched_ragged_input.set_values(context->input(ragged_nested_splits_len));
batched_ragged_input.mutable_nested_splits()->reserve(
ragged_nested_splits_len);
for (int i = ragged_nested_splits_len - 1; i >= 0; --i) {
SPLIT_TYPE nvals;
if (i == ragged_nested_splits_len - 1) {
OP_REQUIRES(context, batched_ragged_input.values().dims() >= 1,
errors::InvalidArgument(
"Requires flat_values to have rank>=1 when "
"nested_row_splits is not empty, but is 0."));
nvals = batched_ragged_input.values().dim_size(0);
} else {
nvals = ragged_nested_splits_in[i + 1].dim_size(0) - 1;
}
OP_REQUIRES_OK(context, RaggedTensorVerifySplits<SPLIT_TYPE>(
ragged_nested_splits_in[i], true, nvals));
}
for (int i = 0; i < ragged_nested_splits_len; i++) {
batched_ragged_input.append_splits(ragged_nested_splits_in[i]);
}
if (!batched_input_) {
Tensor* encoded_scalar;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({}),
&encoded_scalar));
encoded_scalar->scalar<Variant>()() = std::move(batched_ragged_input);
return;
}
std::vector<RaggedTensorVariant> unbatched_ragged_input;
OP_REQUIRES_OK(context, UnbatchRaggedZerothDim<VALUE_TYPE, SPLIT_TYPE>(
batched_ragged_input, &unbatched_ragged_input));
Tensor* encoded_vector;
int64_t output_size = unbatched_ragged_input.size();
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({output_size}),
&encoded_vector));
auto encoded_vector_t = encoded_vector->vec<Variant>();
for (auto i = decltype(output_size){}; i < output_size; i++) {
encoded_vector_t(i) = unbatched_ragged_input[i];
}
}
private:
bool batched_input_;
};
template <typename VALUE_TYPE, typename SPLIT_TYPE>
class RaggedTensorToVariantGradientOp : public OpKernel {
public:
using OpKernel::OpKernel;
void Compute(OpKernelContext* context) override {
Tensor encoded_variant = context->input(0);
Tensor row_splits = context->input(1);
auto flat_row_splits = row_splits.flat<SPLIT_TYPE>();
TensorShape dense_values_shape;
OP_REQUIRES_OK(context,
TensorShapeUtils::MakeShape(context->input(2).vec<int32>(),
&dense_values_shape));
if (row_splits.dims()) {
OP_REQUIRES_OK(
context, RaggedTensorVerifySplits<SPLIT_TYPE>(row_splits, false, 0));
}
const auto& flat_variants = encoded_variant.flat<Variant>();
std::vector<Tensor> values;
for (int i = 0; i < flat_variants.size(); ++i) {
if (const auto* encoded = flat_variants(i).get<RaggedTensorVariant>()) {
values.push_back(encoded->values());
} else {
const auto value_dtype = DataTypeToEnum<VALUE_TYPE>::v();
auto piece_size = flat_row_splits(i + 1) - flat_row_splits(i);
TensorShape zeros_shape = dense_values_shape;
zeros_shape.set_dim(0, piece_size);
Tensor zero(value_dtype, zeros_shape);
zero.flat<VALUE_TYPE>().setZero();
values.push_back(zero);
}
}
if (values.size() == 1) {
context->set_output(0, values[0]);
} else {
Tensor* out = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, dense_values_shape, &out));
if (dense_values_shape.num_elements() == 0) return;
using Piece = typename TTypes<VALUE_TYPE, 2>::Matrix;
using ConstPiece = typename TTypes<VALUE_TYPE, 2>::ConstMatrix;
std::vector<std::unique_ptr<ConstPiece>> pieces;
pieces.reserve(values.size());
for (const Tensor& t : values) {
if (t.NumElements() == 0) continue;
pieces.emplace_back(
new ConstPiece(t.shaped<VALUE_TYPE, 2>({1, t.NumElements()})));
}
Piece out_flat =
out->shaped<VALUE_TYPE, 2>({1, dense_values_shape.num_elements()});
ConcatCPU<VALUE_TYPE>(context->device(), pieces, &out_flat);
}
}
};
#define REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, split_type) \
REGISTER_KERNEL_BUILDER(Name("RaggedTensorToVariant") \
.Device(DEVICE_CPU) \
.TypeConstraint<value_type>("Tvalues") \
.TypeConstraint<split_type>("Tsplits"), \
RaggedTensorToVariantOp<value_type, split_type>); \
REGISTER_KERNEL_BUILDER( \
Name("RaggedTensorToVariantGradient") \
.Device(DEVICE_CPU) \
.TypeConstraint<value_type>("Tvalues") \
.TypeConstraint<split_type>("Tsplits"), \
RaggedTensorToVariantGradientOp<value_type, split_type>);
#define REGISTER_KERNELS(value_type) \
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64_t)
TF_CALL_POD_TYPES(REGISTER_KERNELS);
TF_CALL_tstring(REGISTER_KERNELS);
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS);
TF_CALL_quint16(REGISTER_KERNELS);
TF_CALL_qint16(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#undef REGISTER_KERNELS_WITH_SPLIT_TYPE
} | #include "tensorflow/core/kernels/ragged_tensor_to_variant_op_test.h"
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ragged_tensor_variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST_F(RaggedTensorToVariantKernelTest, NoValuesInput) {
const std::vector<int64_t> batched_splits_1 = {0, 2, 3, 3};
const std::vector<int64_t> batched_splits_2 = {0, 0, 0, 0};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{batched_splits_1, batched_splits_2}, TensorShape({0}), {}, true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 3);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 0, 0}}, {}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 0}}, {}),
*encoded_list(1).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0}}, {}),
*encoded_list(2).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, 1DValuesRaggedRankOneInput) {
const std::vector<int64_t> batched_splits = {0, 3, 3, 5, 6};
const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6};
BuildEncodeRaggedTensorGraph<int, int64_t>({batched_splits}, TensorShape({6}),
batched_values, true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 4);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {1, 2, 3}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {}),
*encoded_list(1).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {4, 5}),
*encoded_list(2).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {6}),
*encoded_list(3).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, 2DBatchedValuesRankOneInput) {
const std::vector<int64_t> batched_splits = {0, 1, 2, 3};
const std::vector<int> batched_values = {1, 2, 4, 5, 6, 7};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{batched_splits}, TensorShape({3, 2}), batched_values, true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 3);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {1, 2}, {1, 2}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {1, 2}, {4, 5}),
*encoded_list(1).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {1, 2}, {6, 7}),
*encoded_list(2).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, 2DBatchedValuesRankTwoInput) {
const std::vector<int64_t> batched_splits_1 = {0, 1, 2};
const std::vector<int64_t> batched_splits_2 = {0, 2, 3};
const std::vector<int> batched_values = {1, 2, 4, 5, 6, 7};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{batched_splits_1, batched_splits_2}, TensorShape({3, 2}), batched_values,
true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 2);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 2}}, {2, 2}, {1, 2, 4, 5}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 1}}, {1, 2}, {6, 7}),
*encoded_list(1).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, EmptyRowInBatchedInput) {
const std::vector<int64_t> batched_splits_1 = {0, 3, 3, 5, 7};
const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 3, 8, 11, 11, 15};
const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{batched_splits_1, batched_splits_2}, TensorShape({15}), batched_values,
true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 4);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 1, 3, 3}}, {1, 2, 3}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0}}, {}),
*encoded_list(1).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 5, 8}},
{4, 5, 6, 7, 8, 9, 10, 11}),
*encoded_list(2).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({{0, 0, 4}}, {12, 13, 14, 15}),
*encoded_list(3).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, NonEmptyBatchedInput) {
const std::vector<int64_t> batched_splits_1 = {0, 5, 10};
const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 4, 5, 6,
7, 8, 9, 10, 11};
const std::vector<int64_t> batched_splits_3 = {0, 2, 3, 4, 5, 6,
7, 8, 9, 12, 13, 14};
const std::vector<int> batched_values = {0, 1, 1, 2, 2, 3, 4,
5, 6, 7, 8, 9, 8, 9};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{batched_splits_1, batched_splits_2, batched_splits_3}, TensorShape({14}),
batched_values, true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 2);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>(
{{0, 1, 3, 4, 5, 6}, {0, 2, 3, 4, 5, 6, 7}}, {0, 1, 1, 2, 2, 3, 4}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>(
{{0, 1, 2, 3, 4, 5}, {0, 1, 2, 5, 6, 7}}, {5, 6, 7, 8, 9, 8, 9}),
*encoded_list(1).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, NonEmptyBatchedInputInt32Splits) {
const std::vector<int> batched_splits_1 = {0, 5, 10};
const std::vector<int> batched_splits_2 = {0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11};
const std::vector<int> batched_splits_3 = {0, 2, 3, 4, 5, 6,
7, 8, 9, 12, 13, 14};
const std::vector<int> batched_values = {0, 1, 1, 2, 2, 3, 4,
5, 6, 7, 8, 9, 8, 9};
BuildEncodeRaggedTensorGraph<int, int32>(
{batched_splits_1, batched_splits_2, batched_splits_3}, TensorShape({14}),
batched_values, true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 2);
ExpectRaggedTensorVariantEqual<int, int32>(
CreateVariantFromRagged<int, int32>(
{{0, 1, 3, 4, 5, 6}, {0, 2, 3, 4, 5, 6, 7}}, {0, 1, 1, 2, 2, 3, 4}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int32>(
CreateVariantFromRagged<int, int32>(
{{0, 1, 2, 3, 4, 5}, {0, 1, 2, 5, 6, 7}}, {5, 6, 7, 8, 9, 8, 9}),
*encoded_list(1).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, NonBatchInput) {
const std::vector<int64_t> batched_splits_1 = {0, 3, 3, 5, 7};
const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 3, 8, 11, 11, 15};
const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{batched_splits_1, batched_splits_2}, TensorShape({15}), batched_values,
false);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_scalar = GetOutput(0)->scalar<Variant>()();
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>(
{batched_splits_1, batched_splits_2}, batched_values),
*encoded_scalar.get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, ShapeFnTestBatched) {
ShapeInferenceTestOp op("RaggedTensorToVariant");
(*op.node_def.mutable_attr())["Tvalues"].set_type(DT_INT32);
(*op.node_def.mutable_attr())["batched_input"].set_b(true);
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0);
INFER_OK(op, "?", "[?]");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1);
INFER_OK(op, "?;?", "[?]");
INFER_OK(op, "?;[?]", "[?]");
INFER_OK(op, "?;[?,?]", "[?]");
INFER_OK(op, "[?];[5]", "[?]");
INFER_OK(op, "[?];[5,2]", "[?]");
INFER_OK(op, "[5];[5,2]", "[4]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2);
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "?;?;[?]", "[?]");
INFER_OK(op, "?;?;[?,?]", "[?]");
INFER_OK(op, "[?];[?];[5]", "[?]");
INFER_OK(op, "[?];[?];[5,2]", "[?]");
INFER_OK(op, "[6];[?];[5,2]", "[5]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3);
INFER_OK(op, "?;?;?;?", "[?]");
INFER_OK(op, "?;?;?;[?]", "[?]");
INFER_OK(op, "?;?;?;[5]", "[?]");
INFER_OK(op, "[4];?;?;[5]", "[3]");
}
TEST_F(RaggedTensorToVariantKernelTest, ShapeFnTestNotBatched) {
ShapeInferenceTestOp op("RaggedTensorToVariant");
(*op.node_def.mutable_attr())["Tvalues"].set_type(DT_INT32);
(*op.node_def.mutable_attr())["batched_input"].set_b(false);
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0);
INFER_OK(op, "?", "[]");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1);
INFER_OK(op, "?;?", "[]");
INFER_OK(op, "?;[?]", "[]");
INFER_OK(op, "?;[?,?]", "[]");
INFER_OK(op, "[?];[5]", "[]");
INFER_OK(op, "[?];[5,2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2);
INFER_OK(op, "?;?;?", "[]");
INFER_OK(op, "?;?;[?]", "[]");
INFER_OK(op, "?;?;[?,?]", "[]");
INFER_OK(op, "[?];[?];[5]", "[]");
INFER_OK(op, "[?];[?];[5,2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3);
INFER_OK(op, "?;?;?;?", "[]");
INFER_OK(op, "?;?;?;[?]", "[]");
INFER_OK(op, "?;?;?;[5]", "[]");
}
TEST_F(RaggedTensorToVariantKernelTest, NonRaggedInput) {
const std::vector<int> values = {1, 2, 3, 4, 5, 6};
BuildEncodeRaggedTensorGraph<int, int64_t>({}, TensorShape({6}), values,
false);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_scalar = GetOutput(0)->scalar<Variant>()();
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, values),
*encoded_scalar.get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest, NonRaggedBatchedInput) {
TensorShape shape({2, 3, 2});
const std::vector<int> values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
BuildEncodeRaggedTensorGraph<int, int64_t>({}, shape, values, true);
TF_ASSERT_OK(RunOpKernel());
const auto& encoded_list = GetOutput(0)->vec<Variant>();
EXPECT_EQ(encoded_list.size(), 2);
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {3, 2}, {1, 2, 3, 4, 5, 6}),
*encoded_list(0).get<RaggedTensorVariant>());
ExpectRaggedTensorVariantEqual<int, int64_t>(
CreateVariantFromRagged<int, int64_t>({}, {3, 2}, {7, 8, 9, 10, 11, 12}),
*encoded_list(1).get<RaggedTensorVariant>());
}
TEST_F(RaggedTensorToVariantKernelTest,
NestedRowSplitsFirstElementNotZeroError) {
const std::vector<int64_t> splits = {1, 2};
BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({0}), {},
true);
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: first element of "
"ragged splits must be 0 but is 1"));
}
TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsIncreasingError) {
const std::vector<int64_t> splits = {0, 2, -1};
BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({0}), {},
true);
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: ragged splits must be "
"monotonically increasing, but "
"ragged_splits[2]=-1 is smaller than "
"row_splits[1]=2"));
}
TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsSizeMismatchError) {
const std::vector<int64_t> splits = {0, 2, 3};
BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({5}),
{0, 1, 2, 3, 4}, true);
EXPECT_THAT(
RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: last element of ragged splits "
"must be the number of ragged values(5) but is 3"));
}
TEST_F(RaggedTensorToVariantKernelTest,
NestedRowSplitsInnerDimensionSizeMismatchError) {
const std::vector<int64_t> splits1 = {0, 2, 3};
const std::vector<int64_t> splits2 = {0, 3, 3, 4};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{splits1, splits2}, TensorShape({5}), {0, 1, 2, 3, 4}, true);
EXPECT_THAT(
RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: last element of ragged splits "
"must be the number of ragged values(5) but is 4"));
}
TEST_F(RaggedTensorToVariantKernelTest,
NestedRowSplitsSizeOfSplitsMismatchError) {
const std::vector<int64_t> splits1 = {0, 2};
const std::vector<int64_t> splits2 = {0, 3, 3, 5};
BuildEncodeRaggedTensorGraph<int, int64_t>(
{splits1, splits2}, TensorShape({5}), {0, 1, 2, 3, 4}, true);
EXPECT_THAT(
RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: last element of ragged splits "
"must be the number of ragged values(3) but is 2"));
}
TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsEmptySplitsError) {
const std::vector<int64_t> splits = {};
BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({5}),
{0, 1, 2, 3, 4}, true);
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: ragged splits must "
"have at least one splits, but is empty"));
}
TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsScalarValueError) {
const std::vector<int64_t> splits = {0, 2};
BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({}), 1,
true);
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Requires flat_values to have rank>=1 when "
"nested_row_splits is not empty, but is 0."));
}
TEST_F(RaggedTensorToVariantGradientKernelTest, RowSplitsMatch) {
auto encoded_variant_grad_1 =
CreateVariantFromRagged<int, int64_t>({}, {3}, {1, 2, 3});
auto encoded_variant_grad_2 =
CreateVariantFromRagged<int, int64_t>({}, {0}, {});
auto encoded_variant_grad_3 =
CreateVariantFromRagged<int, int64_t>({}, {2}, {4, 5});
auto encoded_variant_grad_4 =
CreateVariantFromRagged<int, int64_t>({}, {1}, {6});
BuildEncodeRaggedTensorGradientGraph<int, int64_t>(
{encoded_variant_grad_1, encoded_variant_grad_2, encoded_variant_grad_3,
encoded_variant_grad_4},
{0, 3, 3, 5, 6}, {6});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(RaggedTensorToVariantGradientKernelTest,
RowSplitsFirstElementNotZeroError) {
auto encoded_variant_grad_1 =
CreateVariantFromRagged<int, int64_t>({}, {3}, {1, 2, 3});
auto encoded_variant_grad_2 =
CreateVariantFromRagged<int, int64_t>({}, {0}, {});
auto encoded_variant_grad_3 =
CreateVariantFromRagged<int, int64_t>({}, {2}, {4, 5});
auto encoded_variant_grad_4 =
CreateVariantFromRagged<int, int64_t>({}, {1}, {6});
BuildEncodeRaggedTensorGradientGraph<int, int64_t>(
{encoded_variant_grad_1, encoded_variant_grad_2, encoded_variant_grad_3,
encoded_variant_grad_4},
{1, 3, 3, 5, 6}, {6});
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: first element of "
"ragged splits must be 0 but is 1"));
}
TEST_F(RaggedTensorToVariantGradientKernelTest, RowSplitsIncreasingError) {
auto encoded_variant_grad_1 =
CreateVariantFromRagged<int, int64_t>({}, {3}, {1, 2, 3});
auto encoded_variant_grad_2 =
CreateVariantFromRagged<int, int64_t>({}, {0}, {});
auto encoded_variant_grad_3 =
CreateVariantFromRagged<int, int64_t>({}, {2}, {4, 5});
auto encoded_variant_grad_4 =
CreateVariantFromRagged<int, int64_t>({}, {1}, {6});
BuildEncodeRaggedTensorGradientGraph<int, int64_t>(
{encoded_variant_grad_1, encoded_variant_grad_2, encoded_variant_grad_3,
encoded_variant_grad_4},
{0, 3, 2, 5, 6}, {6});
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(error::INVALID_ARGUMENT,
"Invalid ragged splits: ragged splits must be "
"monotonically increasing, but "
"ragged_splits[2]=2 is smaller than "
"row_splits[1]=3"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_variant_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ead042dd-f68b-44ed-b69a-5679dbf2156e | cpp | tensorflow/tensorflow | execution_context | third_party/xla/xla/ffi/execution_context.cc | third_party/xla/xla/ffi/execution_context_test.cc | #include "xla/ffi/execution_context.h"
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
namespace xla::ffi {
ExecutionContext::UserData::UserData(void* data, Deleter<void> deleter)
: data_(data), deleter_(std::move(deleter)) {}
ExecutionContext::UserData::~UserData() {
if (deleter_) deleter_(data_);
}
absl::Status ExecutionContext::Insert(TypeId type_id, void* data,
Deleter<void> deleter) {
return InsertUserData(type_id,
std::make_unique<UserData>(data, std::move(deleter)));
}
absl::Status ExecutionContext::InsertUserData(TypeId type_id,
std::unique_ptr<UserData> data) {
if (!data) return absl::InvalidArgumentError("User data must be not null");
auto emplaced = user_data_.emplace(type_id, std::move(data));
if (!emplaced.second) {
return absl::AlreadyExistsError(
absl::StrCat("User data with type id ", type_id.value(),
" already exists in execution context"));
}
return absl::OkStatus();
}
absl::StatusOr<ExecutionContext::UserData*> ExecutionContext::LookupUserData(
TypeId type_id) const {
auto it = user_data_.find(type_id);
if (it == user_data_.end()) {
return absl::NotFoundError(absl::StrCat("User data with type id ",
type_id.value(),
" not found in execution context"));
}
return it->second.get();
}
} | #include "xla/ffi/execution_context.h"
#include <cstdint>
#include <string>
#include "absl/status/status.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::ffi {
struct I32UserData {
explicit I32UserData(int32_t value) : value(value) {}
int32_t value;
};
struct StrUserData {
explicit StrUserData(std::string value) : value(value) {}
std::string value;
};
TEST(ExecutionContextTest, EmplaceUserData) {
ExecutionContext context;
TF_ASSERT_OK(context.Emplace<I32UserData>(42));
TF_ASSERT_OK(context.Emplace<StrUserData>("hello"));
TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup<I32UserData>());
TF_ASSERT_OK_AND_ASSIGN(auto* str_data, context.Lookup<StrUserData>());
ASSERT_NE(i32_data, nullptr);
ASSERT_NE(str_data, nullptr);
ASSERT_EQ(i32_data->value, 42);
ASSERT_EQ(str_data->value, "hello");
}
TEST(ExecutionContextTest, InsertUserOwned) {
I32UserData user_data(42);
ExecutionContext context;
TF_ASSERT_OK(context.Insert(&user_data));
TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup<I32UserData>());
ASSERT_EQ(i32_data, &user_data);
}
TEST(ExecutionContextTest, InsertUserOwnedWithTypeId) {
TF_ASSERT_OK_AND_ASSIGN(
TypeIdRegistry::TypeId type_id,
TypeIdRegistry::RegisterExternalTypeId("I32UserData"));
I32UserData user_data(42);
ExecutionContext context;
TF_ASSERT_OK(context.Insert(type_id, &user_data));
TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup(type_id));
ASSERT_EQ(i32_data, &user_data);
}
TEST(ExecutionContextTest, UserDataNotFound) {
ExecutionContext context;
auto i32_data = context.Lookup<I32UserData>();
ASSERT_EQ(i32_data.status().code(), absl::StatusCode::kNotFound);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/execution_context.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/execution_context_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0814523e-f546-4e68-82e3-41d15849af8a | cpp | abseil/abseil-cpp | errno_saver | absl/base/internal/errno_saver.h | absl/base/internal/errno_saver_test.cc | #ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
#include <cerrno>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
class ErrnoSaver {
public:
ErrnoSaver() : saved_errno_(errno) {}
~ErrnoSaver() { errno = saved_errno_; }
int operator()() const { return saved_errno_; }
private:
const int saved_errno_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/base/internal/errno_saver.h"
#include <cerrno>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/internal/strerror.h"
namespace {
using ::testing::Eq;
struct ErrnoPrinter {
int no;
};
std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) {
return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]";
}
bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; }
TEST(ErrnoSaverTest, Works) {
errno = EDOM;
{
absl::base_internal::ErrnoSaver errno_saver;
EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM}));
errno = ERANGE;
EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{ERANGE}));
EXPECT_THAT(ErrnoPrinter{errno_saver()}, Eq(ErrnoPrinter{EDOM}));
}
EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM}));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/errno_saver.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/errno_saver_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
77bb6f69-f3ad-456a-9ead-66283c2d0924 | cpp | abseil/abseil-cpp | mutex | absl/synchronization/mutex.cc | absl/synchronization/mutex_test.cc | #include "absl/synchronization/mutex.h"
#ifdef _WIN32
#include <windows.h>
#ifdef ERROR
#undef ERROR
#endif
#else
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <sys/time.h>
#endif
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <thread>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/low_level_alloc.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
#include "absl/base/optimization.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/synchronization/internal/graphcycles.h"
#include "absl/synchronization/internal/per_thread_sem.h"
#include "absl/time/time.h"
using absl::base_internal::CurrentThreadIdentityIfPresent;
using absl::base_internal::CycleClock;
using absl::base_internal::PerThreadSynch;
using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using absl::synchronization_internal::GraphCycles;
using absl::synchronization_internal::GraphId;
using absl::synchronization_internal::InvalidGraphId;
using absl::synchronization_internal::KernelTimeout;
using absl::synchronization_internal::PerThreadSem;
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
std::this_thread::yield();
}
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
#if defined(ABSL_HAVE_THREAD_SANITIZER)
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
#else
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
#endif
ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
kDeadlockDetectionDefault);
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
const char* msg, const void* obj, int64_t wait_cycles)>
mutex_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
cond_var_tracer;
}
static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock);
void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles)) {
mutex_tracer.Store(fn);
}
void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
cond_var_tracer.Store(fn);
}
namespace {
enum DelayMode { AGGRESSIVE, GENTLE };
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
absl::once_flag once;
std::atomic<int> spinloop_iterations{0};
int32_t mutex_sleep_spins[2] = {};
absl::Duration mutex_sleep_time;
};
ABSL_CONST_INIT static MutexGlobals globals;
absl::Duration MeasureTimeToYield() {
absl::Time before = absl::Now();
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
return absl::Now() - before;
}
const MutexGlobals& GetMutexGlobals() {
absl::base_internal::LowLevelCallOnce(&globals.once, [&]() {
if (absl::base_internal::NumCPUs() > 1) {
globals.mutex_sleep_spins[AGGRESSIVE] = 5000;
globals.mutex_sleep_spins[GENTLE] = 250;
globals.mutex_sleep_time = absl::Microseconds(10);
} else {
globals.mutex_sleep_spins[AGGRESSIVE] = 0;
globals.mutex_sleep_spins[GENTLE] = 0;
globals.mutex_sleep_time = MeasureTimeToYield() * 5;
globals.mutex_sleep_time =
std::min(globals.mutex_sleep_time, absl::Milliseconds(1));
globals.mutex_sleep_time =
std::max(globals.mutex_sleep_time, absl::Microseconds(10));
}
});
return globals;
}
}
namespace synchronization_internal {
int MutexDelay(int32_t c, int mode) {
const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
if (c < limit) {
c++;
} else {
SchedulingGuard::ScopedEnable enable_rescheduling;
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
if (c == limit) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
} else {
absl::SleepFor(sleep_time);
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
return c;
}
}
static bool AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
for (;;) {
intptr_t v = pv->load(std::memory_order_relaxed);
if ((v & bits) == bits) {
return false;
}
if ((v & wait_until_clear) != 0) {
continue;
}
if (pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_relaxed)) {
return true;
}
}
}
ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT static GraphCycles* deadlock_graph
ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
namespace {
enum {
SYNCH_EV_TRYLOCK_SUCCESS,
SYNCH_EV_TRYLOCK_FAILED,
SYNCH_EV_READERTRYLOCK_SUCCESS,
SYNCH_EV_READERTRYLOCK_FAILED,
SYNCH_EV_LOCK,
SYNCH_EV_LOCK_RETURNING,
SYNCH_EV_READERLOCK,
SYNCH_EV_READERLOCK_RETURNING,
SYNCH_EV_UNLOCK,
SYNCH_EV_READERUNLOCK,
SYNCH_EV_WAIT,
SYNCH_EV_WAIT_RETURNING,
SYNCH_EV_SIGNAL,
SYNCH_EV_SIGNALALL,
};
enum {
SYNCH_F_R = 0x01,
SYNCH_F_LCK = 0x02,
SYNCH_F_TRY = 0x04,
SYNCH_F_UNLOCK = 0x08,
SYNCH_F_LCK_W = SYNCH_F_LCK,
SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
};
}
static const struct {
int flags;
const char* msg;
} event_properties[] = {
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
{0, "TryLock failed "},
{SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
{0, "ReaderTryLock failed "},
{0, "Lock blocking "},
{SYNCH_F_LCK_W, "Lock returning "},
{0, "ReaderLock blocking "},
{SYNCH_F_LCK_R, "ReaderLock returning "},
{SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
{SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
{0, "Wait on "},
{0, "Wait unblocked "},
{0, "Signal on "},
{0, "SignalAll on "},
};
ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
static constexpr uint32_t kNSynchEvent = 1031;
static struct SynchEvent {
int refcount ABSL_GUARDED_BY(synch_event_mu);
SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
uintptr_t masked_addr;
void (*invariant)(void* arg);
void* arg;
bool log;
char name[1];
}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
synch_event_mu.Lock();
constexpr size_t kMaxSynchEventCount = 100 << 10;
static size_t synch_event_count ABSL_GUARDED_BY(synch_event_mu);
if (++synch_event_count > kMaxSynchEventCount) {
synch_event_count = 0;
ABSL_RAW_LOG(ERROR,
"Accumulated %zu Mutex debug objects. If you see this"
" in production, it may mean that the production code"
" accidentally calls "
"Mutex/CondVar::EnableDebugLog/EnableInvariantDebugging.",
kMaxSynchEventCount);
for (auto*& head : synch_event) {
for (auto* e = head; e != nullptr;) {
SynchEvent* next = e->next;
if (--(e->refcount) == 0) {
base_internal::LowLevelAlloc::Free(e);
}
e = next;
}
head = nullptr;
}
}
SynchEvent* e = nullptr;
if (!AtomicSetBits(addr, bits, lockbit)) {
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
e = e->next) {
}
}
if (e == nullptr) {
if (name == nullptr) {
name = "";
}
size_t l = strlen(name);
e = reinterpret_cast<SynchEvent*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
e->refcount = 2;
e->masked_addr = base_internal::HidePtr(addr);
e->invariant = nullptr;
e->arg = nullptr;
e->log = false;
strcpy(e->name, name);
e->next = synch_event[h];
synch_event[h] = e;
} else {
e->refcount++;
}
synch_event_mu.Unlock();
return e;
}
static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
synch_event_mu.Lock();
bool del = (--(e->refcount) == 0);
synch_event_mu.Unlock();
if (del) {
base_internal::LowLevelAlloc::Free(e);
}
}
}
static SynchEvent* GetSynchEvent(const void* addr) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent* e;
synch_event_mu.Lock();
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
e = e->next) {
}
if (e != nullptr) {
e->refcount++;
}
synch_event_mu.Unlock();
return e;
}
static void PostSynchEvent(void* obj, int ev) {
SynchEvent* e = GetSynchEvent(obj);
if (e == nullptr || e->log) {
void* pcs[40];
int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
char buffer[ABSL_ARRAYSIZE(pcs) * 24];
int pos = snprintf(buffer, sizeof(buffer), " @");
for (int i = 0; i != n; i++) {
int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
" %p", pcs[i]);
if (b < 0 ||
static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
break;
}
pos += b;
}
ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
(e == nullptr ? "" : e->name), buffer);
}
const int flags = event_properties[ev].flags;
if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
struct local {
static bool pred(SynchEvent* ev) {
(*ev->invariant)(ev->arg);
return false;
}
};
Condition cond(&local::pred, e);
Mutex* mu = static_cast<Mutex*>(obj);
const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
const bool trylock = (flags & SYNCH_F_TRY) != 0;
const bool read_lock = (flags & SYNCH_F_R) != 0;
EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
}
UnrefSynchEvent(e);
}
struct SynchWaitParams {
SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
KernelTimeout timeout_arg, Mutex* cvmu_arg,
PerThreadSynch* thread_arg,
std::atomic<intptr_t>* cv_word_arg)
: how(how_arg),
cond(cond_arg),
timeout(timeout_arg),
cvmu(cvmu_arg),
thread(thread_arg),
cv_word(cv_word_arg),
contention_start_cycles(CycleClock::Now()),
should_submit_contention_data(false) {}
const Mutex::MuHow how;
const Condition* cond;
KernelTimeout timeout;
Mutex* const cvmu;
PerThreadSynch* const thread;
std::atomic<intptr_t>* cv_word;
int64_t contention_start_cycles;
bool should_submit_contention_data;
};
struct SynchLocksHeld {
int n;
bool overflow;
struct {
Mutex* mu;
int32_t count;
GraphId id;
} locks[40];
};
static PerThreadSynch* const kPerThreadSynchNull =
reinterpret_cast<PerThreadSynch*>(1);
static SynchLocksHeld* LocksHeldAlloc() {
SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
ret->n = 0;
ret->overflow = false;
return ret;
}
static PerThreadSynch* Synch_GetPerThread() {
ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
return &identity->per_thread_synch;
}
static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
PerThreadSynch* w = Synch_GetPerThread();
if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
return w;
}
static SynchLocksHeld* Synch_GetAllLocks() {
PerThreadSynch* s = Synch_GetPerThread();
if (s->all_locks == nullptr) {
s->all_locks = LocksHeldAlloc();
}
return s->all_locks;
}
void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
static_cast<void>(mu);
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
PerThreadSem::Post(w->thread_identity());
ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
static_cast<void>(mu);
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
assert(w == Synch_GetPerThread());
static_cast<void>(w);
bool res = PerThreadSem::Wait(t);
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
return res;
}
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
if (identity != nullptr) {
identity->per_thread_synch.suppress_fatal_errors = true;
}
synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
std::memory_order_release);
}
static const intptr_t kMuReader = 0x0001L;
static const intptr_t kMuDesig = 0x0002L;
static const intptr_t kMuWait = 0x0004L;
static const intptr_t kMuWriter = 0x0008L;
static const intptr_t kMuEvent = 0x0010L;
static const intptr_t kMuWrWait = 0x0020L;
static const intptr_t kMuSpin = 0x0040L;
static const intptr_t kMuLow = 0x00ffL;
static const intptr_t kMuHigh = ~kMuLow;
static_assert((0xab & (kMuWriter | kMuReader)) == (kMuWriter | kMuReader),
"The debug allocator's uninitialized pattern (0xab) must be an "
"invalid mutex state");
static_assert((0xcd & (kMuWriter | kMuReader)) == (kMuWriter | kMuReader),
"The debug allocator's freed pattern (0xcd) must be an invalid "
"mutex state");
enum {
kGdbMuSpin = kMuSpin,
kGdbMuEvent = kMuEvent,
kGdbMuWait = kMuWait,
kGdbMuWriter = kMuWriter,
kGdbMuDesig = kMuDesig,
kGdbMuWrWait = kMuWrWait,
kGdbMuReader = kMuReader,
kGdbMuLow = kMuLow,
};
static const intptr_t kMuOne = 0x0100;
static const int kMuHasBlocked = 0x01;
static const int kMuIsCond = 0x02;
static const int kMuIsFer = 0x04;
static_assert(PerThreadSynch::kAlignment > kMuLow,
"PerThreadSynch::kAlignment must be greater than kMuLow");
struct MuHowS {
intptr_t fast_need_zero;
intptr_t fast_or;
intptr_t fast_add;
intptr_t slow_need_zero;
intptr_t slow_inc_need_zero;
};
static const MuHowS kSharedS = {
kMuWriter | kMuWait | kMuEvent,
kMuReader,
kMuOne,
kMuWriter | kMuWait,
kMuSpin | kMuWriter | kMuWrWait,
};
static const MuHowS kExclusiveS = {
kMuWriter | kMuReader | kMuEvent,
kMuWriter,
0,
kMuWriter | kMuReader,
~static_cast<intptr_t>(0),
};
static const Mutex::MuHow kShared = &kSharedS;
static const Mutex::MuHow kExclusive = &kExclusiveS;
#ifdef NDEBUG
static constexpr bool kDebugMode = false;
#else
static constexpr bool kDebugMode = true;
#endif
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
static unsigned TsanFlags(Mutex::MuHow how) {
return how == kShared ? __tsan_mutex_read_lock : 0;
}
#endif
#if defined(__APPLE__) || defined(ABSL_BUILD_DLL)
Mutex::~Mutex() { Dtor(); }
#endif
#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER)
void Mutex::Dtor() {
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
#endif
void Mutex::EnableDebugLog(const char* name) {
ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();
SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
ABSL_ATTRIBUTE_UNUSED volatile auto dtor = &Mutex::Dtor;
ABSL_ANNOTATE_IGNORE_WRITES_END();
}
void EnableMutexInvariantDebugging(bool enabled) {
synch_check_invariants.store(enabled, std::memory_order_release);
}
void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
e->invariant = invariant;
e->arg = arg;
UnrefSynchEvent(e);
}
ABSL_ANNOTATE_IGNORE_WRITES_END();
}
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
synch_deadlock_detection.store(mode, std::memory_order_release);
}
static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
}
static PerThreadSynch* Skip(PerThreadSynch* x) {
PerThreadSynch* x0 = nullptr;
PerThreadSynch* x1 = x;
PerThreadSynch* x2 = x->skip;
if (x2 != nullptr) {
while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
x0->skip = x2;
}
x->skip = x1;
}
return x1;
}
static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
if (ancestor->skip == to_be_removed) {
if (to_be_removed->skip != nullptr) {
ancestor->skip = to_be_removed->skip;
} else if (ancestor->next != to_be_removed) {
ancestor->skip = ancestor->next;
} else {
ancestor->skip = nullptr;
}
}
}
static void CondVarEnqueue(SynchWaitParams* waitp);
static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
intptr_t mu, int flags) {
if (waitp->cv_word != nullptr) {
CondVarEnqueue(waitp);
return head;
}
PerThreadSynch* s = waitp->thread;
ABSL_RAW_CHECK(
s->waitp == nullptr ||
s->waitp == waitp ||
s->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
s->waitp = waitp;
s->skip = nullptr;
s->may_skip = true;
s->wake = false;
s->cond_waiter = ((flags & kMuIsCond) != 0);
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
if ((flags & kMuIsFer) == 0) {
assert(s == Synch_GetPerThread());
int64_t now_cycles = CycleClock::Now();
if (s->next_priority_read_cycles < now_cycles) {
int policy;
struct sched_param param;
const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
if (err != 0) {
ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
} else {
s->priority = param.sched_priority;
s->next_priority_read_cycles =
now_cycles + static_cast<int64_t>(CycleClock::Frequency());
}
}
}
#endif
if (head == nullptr) {
s->next = s;
s->readers = mu;
s->maybe_unlocking = false;
head = s;
} else {
PerThreadSynch* enqueue_after = nullptr;
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
if (s->priority > head->priority) {
if (!head->maybe_unlocking) {
PerThreadSynch* advance_to = head;
do {
enqueue_after = advance_to;
advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
} else if (waitp->how == kExclusive && waitp->cond == nullptr) {
enqueue_after = head;
}
}
#endif
if (enqueue_after != nullptr) {
s->next = enqueue_after->next;
enqueue_after->next = s;
ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
MuEquivalentWaiter(enqueue_after, s),
"Mutex Enqueue failure");
if (enqueue_after != head && enqueue_after->may_skip &&
MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
enqueue_after->skip = enqueue_after->next;
}
if (MuEquivalentWaiter(s, s->next)) {
s->skip = s->next;
}
} else if ((flags & kMuHasBlocked) &&
(s->priority >= head->next->priority) &&
(!head->maybe_unlocking ||
(waitp->how == kExclusive &&
Condition::GuaranteedEqual(waitp->cond, nullptr)))) {
s->next = head->next;
head->next = s;
if (MuEquivalentWaiter(s, s->next)) {
s->skip = s->next;
}
} else {
s->next = head->next;
head->next = s;
s->readers = head->readers;
s->maybe_unlocking = head->maybe_unlocking;
if (head->may_skip && MuEquivalentWaiter(head, s)) {
head->skip = s;
}
head = s;
}
}
s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
return head;
}
static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
PerThreadSynch* w = pw->next;
pw->next = w->next;
if (head == w) {
head = (pw == w) ? nullptr : pw;
} else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
if (pw->next->skip !=
nullptr) {
pw->skip = pw->next->skip;
} else {
pw->skip = pw->next;
}
}
return head;
}
static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
PerThreadSynch* pw,
PerThreadSynch** wake_tail) {
PerThreadSynch* orig_h = head;
PerThreadSynch* w = pw->next;
bool skipped = false;
do {
if (w->wake) {
ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
head = Dequeue(head, pw);
w->next = *wake_tail;
*wake_tail = w;
wake_tail = &w->next;
if (w->waitp->how == kExclusive) {
break;
}
} else {
pw = Skip(w);
skipped = true;
}
w = pw->next;
} while (orig_h == head && (pw != head || !skipped));
return head;
}
void Mutex::TryRemove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
std::memory_order_acquire,
std::memory_order_relaxed)) {
PerThreadSynch* h = GetPerThreadSynch(v);
if (h != nullptr) {
PerThreadSynch* pw = h;
PerThreadSynch* w;
if ((w = pw->next) != s) {
do {
if (!MuEquivalentWaiter(s, w)) {
pw = Skip(w);
} else {
FixSkip(w, s);
pw = w;
}
} while ((w = pw->next) != s && pw != h);
}
if (w == s) {
h = Dequeue(h, pw);
s->next = nullptr;
s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
}
}
intptr_t nv;
do {
v = mu_.load(std::memory_order_relaxed);
nv = v & (kMuDesig | kMuEvent);
if (h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(h);
h->readers = 0;
h->maybe_unlocking = false;
}
} while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
}
}
void Mutex::Block(PerThreadSynch* s) {
while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
this->TryRemove(s);
int c = 0;
while (s->next != nullptr) {
c = synchronization_internal::MutexDelay(c, GENTLE);
this->TryRemove(s);
}
if (kDebugMode) {
this->TryRemove(s);
}
s->waitp->timeout = KernelTimeout::Never();
s->waitp->cond = nullptr;
}
}
ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
"detected illegal recursion in Mutex code");
s->waitp = nullptr;
}
PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
PerThreadSynch* next = w->next;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
IncrementSynchSem(this, w);
return next;
}
static GraphId GetGraphIdLocked(Mutex* mu)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
if (!deadlock_graph) {
deadlock_graph =
new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
GraphCycles;
}
return deadlock_graph->GetId(mu);
}
static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
deadlock_graph_mu.Lock();
GraphId id = GetGraphIdLocked(mu);
deadlock_graph_mu.Unlock();
return id;
}
static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
i++;
}
if (i == n) {
if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
held_locks->overflow = true;
} else {
held_locks->locks[i].mu = mu;
held_locks->locks[i].count = 1;
held_locks->locks[i].id = id;
held_locks->n = n + 1;
}
} else {
held_locks->locks[i].count++;
}
}
static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
i++;
}
if (i == n) {
if (!held_locks->overflow) {
i = 0;
while (i != n && held_locks->locks[i].mu != mu) {
i++;
}
if (i == n) {
SynchEvent* mu_events = GetSynchEvent(mu);
ABSL_RAW_LOG(FATAL,
"thread releasing lock it does not hold: %p %s; "
,
static_cast<void*>(mu),
mu_events == nullptr ? "" : mu_events->name);
}
}
} else if (held_locks->locks[i].count == 1) {
held_locks->n = n - 1;
held_locks->locks[i] = held_locks->locks[n - 1];
held_locks->locks[n - 1].id = InvalidGraphId();
held_locks->locks[n - 1].mu =
nullptr;
} else {
assert(held_locks->locks[i].count > 0);
held_locks->locks[i].count--;
}
}
static inline void DebugOnlyLockEnter(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
}
}
}
static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
LockEnter(mu, id, Synch_GetAllLocks());
}
}
}
static inline void DebugOnlyLockLeave(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
}
}
}
static char* StackString(void** pcs, int n, char* buf, int maxlen,
bool symbolize) {
static constexpr int kSymLen = 200;
char sym[kSymLen];
int len = 0;
for (int i = 0; i != n; i++) {
if (len >= maxlen)
return buf;
size_t count = static_cast<size_t>(maxlen - len);
if (symbolize) {
if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
sym[0] = '\0';
}
snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
sym);
} else {
snprintf(buf + len, count, " %p", pcs[i]);
}
len += strlen(&buf[len]);
}
return buf;
}
static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
void* pcs[40];
return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
maxlen, symbolize);
}
namespace {
enum {
kMaxDeadlockPathLen = 10
};
struct DeadlockReportBuffers {
char buf[6100];
GraphId path[kMaxDeadlockPathLen];
};
struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffers() {
b = reinterpret_cast<DeadlockReportBuffers*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
}
~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
DeadlockReportBuffers* b;
};
int GetStack(void** stack, int max_depth) {
return absl::GetStackTrace(stack, max_depth, 3);
}
}
static GraphId DeadlockCheck(Mutex* mu) {
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kIgnore) {
return InvalidGraphId();
}
SynchLocksHeld* all_locks = Synch_GetAllLocks();
absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
const GraphId mu_id = GetGraphIdLocked(mu);
if (all_locks->n == 0) {
return mu_id;
}
deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
for (int i = 0; i != all_locks->n; i++) {
const GraphId other_node_id = all_locks->locks[i].id;
const Mutex* other =
static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
if (other == nullptr) {
continue;
}
if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
ScopedDeadlockReportBuffers scoped_buffers;
DeadlockReportBuffers* b = scoped_buffers.b;
static int number_of_reported_deadlocks = 0;
number_of_reported_deadlocks++;
bool symbolize = number_of_reported_deadlocks <= 2;
ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
CurrentStackString(b->buf, sizeof (b->buf), symbolize));
size_t len = 0;
for (int j = 0; j != all_locks->n; j++) {
void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
if (pr != nullptr) {
snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
len += strlen(&b->buf[len]);
}
}
ABSL_RAW_LOG(ERROR,
"Acquiring absl::Mutex %p while holding %s; a cycle in the "
"historical lock ordering graph has been observed",
static_cast<void*>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
ABSL_ARRAYSIZE(b->path), b->path);
for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j];
Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue;
void** stack;
int depth = deadlock_graph->GetStackTrace(id, &stack);
snprintf(b->buf, sizeof(b->buf),
"mutex@%p stack: ", static_cast<void*>(path_mu));
StackString(stack, depth, b->buf + strlen(b->buf),
static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
symbolize);
ABSL_RAW_LOG(ERROR, "%s", b->buf);
}
if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
}
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kAbort) {
deadlock_graph_mu.Unlock();
ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
return mu_id;
}
break;
}
}
return mu_id;
}
static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
return DeadlockCheck(mu);
} else {
return InvalidGraphId();
}
}
void Mutex::ForgetDeadlockInfo() {
if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
deadlock_graph_mu.Lock();
if (deadlock_graph != nullptr) {
deadlock_graph->RemoveNode(this);
}
deadlock_graph_mu.Unlock();
}
}
void Mutex::AssertNotHeld() const {
if (kDebugMode &&
(mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
GraphId id = GetGraphId(const_cast<Mutex*>(this));
SynchLocksHeld* locks = Synch_GetAllLocks();
for (int i = 0; i != locks->n; i++) {
if (locks->locks[i].id == id) {
SynchEvent* mu_events = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
static_cast<const void*>(this),
(mu_events == nullptr ? "" : mu_events->name));
}
}
}
}
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
int c = globals.spinloop_iterations.load(std::memory_order_relaxed);
do {
intptr_t v = mu->load(std::memory_order_relaxed);
if ((v & (kMuReader | kMuEvent)) != 0) {
return false;
} else if (((v & kMuWriter) == 0) &&
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
std::memory_order_relaxed)) {
return true;
}
} while (--c > 0);
return false;
}
void Mutex::Lock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_FALSE((v & (kMuWriter | kMuReader | kMuEvent)) != 0) ||
ABSL_PREDICT_FALSE(!mu_.compare_exchange_strong(
v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed))) {
if (ABSL_PREDICT_FALSE(!TryAcquireWithSpinning(&this->mu_))) {
this->LockSlow(kExclusive, nullptr, 0);
}
}
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}
void Mutex::ReaderLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
for (;;) {
if (ABSL_PREDICT_FALSE(v & (kMuWriter | kMuWait | kMuEvent)) != 0) {
this->LockSlow(kShared, nullptr, 0);
break;
}
if (ABSL_PREDICT_TRUE(mu_.compare_exchange_weak(
v, (kMuReader | v) + kMuOne, std::memory_order_acquire,
std::memory_order_relaxed))) {
break;
}
}
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
bool Mutex::LockWhenCommon(const Condition& cond,
synchronization_internal::KernelTimeout t,
bool write) {
MuHow how = write ? kExclusive : kShared;
ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
GraphId id = DebugOnlyDeadlockCheck(this);
bool res = LockSlowWithDeadline(how, &cond, t, 0);
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
return res;
}
bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
if (kDebugMode) {
this->AssertReaderHeld();
}
if (cond.Eval()) {
return true;
}
MuHow how =
(mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
SynchWaitParams waitp(how, &cond, t, nullptr ,
Synch_GetPerThreadAnnotated(this),
nullptr );
this->UnlockSlow(&waitp);
this->Block(waitp.thread);
ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
this->LockSlowLoop(&waitp, kMuHasBlocked | kMuIsCond);
bool res = waitp.cond != nullptr ||
EvalConditionAnnotated(&cond, this, true, false, how == kShared);
ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
ABSL_RAW_CHECK(res || t.has_timeout(),
"condition untrue on return from Await");
return res;
}
bool Mutex::TryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_TRUE((v & (kMuWriter | kMuReader | kMuEvent)) == 0)) {
if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(
v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed))) {
DebugOnlyLockEnter(this);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
return true;
}
} else if (ABSL_PREDICT_FALSE((v & kMuEvent) != 0)) {
return TryLockSlow();
}
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
return false;
}
ABSL_ATTRIBUTE_NOINLINE bool Mutex::TryLockSlow() {
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kExclusive->slow_need_zero) == 0 &&
mu_.compare_exchange_strong(
v, (kExclusive->fast_or | v) + kExclusive->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
DebugOnlyLockEnter(this);
PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
return true;
}
PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
return false;
}
bool Mutex::ReaderTryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this,
__tsan_mutex_read_lock | __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
#if defined(__clang__)
#pragma nounroll
#endif
for (int loop_limit = 5; loop_limit != 0; loop_limit--) {
if (ABSL_PREDICT_FALSE((v & (kMuWriter | kMuWait | kMuEvent)) != 0)) {
break;
}
if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(
v, (kMuReader | v) + kMuOne, std::memory_order_acquire,
std::memory_order_relaxed))) {
DebugOnlyLockEnter(this);
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
return true;
}
}
if (ABSL_PREDICT_TRUE((v & kMuEvent) == 0)) {
ABSL_TSAN_MUTEX_POST_LOCK(this,
__tsan_mutex_read_lock | __tsan_mutex_try_lock |
__tsan_mutex_try_lock_failed,
0);
return false;
}
return ReaderTryLockSlow();
}
ABSL_ATTRIBUTE_NOINLINE bool Mutex::ReaderTryLockSlow() {
intptr_t v = mu_.load(std::memory_order_relaxed);
#if defined(__clang__)
#pragma nounroll
#endif
for (int loop_limit = 5; loop_limit != 0; loop_limit--) {
if ((v & kShared->slow_need_zero) == 0 &&
mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
std::memory_order_acquire,
std::memory_order_relaxed)) {
DebugOnlyLockEnter(this);
PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
return true;
}
}
PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
ABSL_TSAN_MUTEX_POST_LOCK(this,
__tsan_mutex_read_lock | __tsan_mutex_try_lock |
__tsan_mutex_try_lock_failed,
0);
return false;
}
void Mutex::Unlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
static_cast<unsigned>(v));
}
bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
(v & (kMuWait | kMuDesig)) != kMuWait);
static_assert(kMuEvent > kMuWait, "Needed for should_try_cas_fast");
static_assert(kMuEvent > kMuDesig, "Needed for should_try_cas_fast");
static_assert(kMuWriter > kMuWait, "Needed for should_try_cas_fast");
static_assert(kMuWriter > kMuDesig, "Needed for should_try_cas_fast");
bool should_try_cas_fast =
((v ^ (kMuWriter | kMuDesig)) &
(kMuEvent | kMuWriter | kMuWait | kMuDesig)) < (kMuWait | kMuDesig);
if (kDebugMode && should_try_cas != should_try_cas_fast) {
ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
static_cast<long long>(v),
static_cast<long long>(should_try_cas),
static_cast<long long>(should_try_cas_fast));
}
if (should_try_cas_fast &&
mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
std::memory_order_release,
std::memory_order_relaxed)) {
} else {
this->UnlockSlow(nullptr );
}
ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
}
static bool ExactlyOneReader(intptr_t v) {
assert((v & (kMuWriter | kMuReader)) == kMuReader);
assert((v & kMuHigh) != 0);
constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
return (v & kMuMultipleWaitersMask) == 0;
}
void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
assert((v & (kMuWriter | kMuReader)) == kMuReader);
for (;;) {
if (ABSL_PREDICT_FALSE((v & (kMuReader | kMuWait | kMuEvent)) !=
kMuReader)) {
this->UnlockSlow(nullptr );
break;
}
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
if (ABSL_PREDICT_TRUE(
mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed))) {
break;
}
}
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
}
static intptr_t ClearDesignatedWakerMask(int flag) {
assert(flag >= 0);
assert(flag <= 1);
switch (flag) {
case 0:
return ~static_cast<intptr_t>(0);
case 1:
return ~static_cast<intptr_t>(kMuDesig);
}
ABSL_UNREACHABLE();
}
static intptr_t IgnoreWaitingWritersMask(int flag) {
assert(flag >= 0);
assert(flag <= 1);
switch (flag) {
case 0:
return ~static_cast<intptr_t>(0);
case 1:
return ~static_cast<intptr_t>(kMuWrWait);
}
ABSL_UNREACHABLE();
}
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
int flags) {
if (ABSL_PREDICT_FALSE(
globals.spinloop_iterations.load(std::memory_order_relaxed) == 0)) {
if (absl::base_internal::NumCPUs() > 1) {
globals.spinloop_iterations.store(1500, std::memory_order_relaxed);
} else {
globals.spinloop_iterations.store(-1, std::memory_order_relaxed);
}
}
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
"condition untrue on return from LockSlow");
}
static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock) {
bool res = false;
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
#endif
if (locking) {
ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
res = cond->Eval();
ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
} else {
ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
res = cond->Eval();
ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
}
static_cast<void>(mu);
static_cast<void>(trylock);
static_cast<void>(read_lock);
return res;
}
static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
bool res = cond->Eval();
ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
static_cast<void>(mu);
return res;
}
bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
KernelTimeout t, int flags) {
intptr_t v = mu_.load(std::memory_order_relaxed);
bool unlock = false;
if ((v & how->fast_need_zero) == 0 &&
mu_.compare_exchange_strong(
v,
(how->fast_or |
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (cond == nullptr ||
EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
return true;
}
unlock = true;
}
SynchWaitParams waitp(how, cond, t, nullptr ,
Synch_GetPerThreadAnnotated(this),
nullptr );
if (cond != nullptr) {
flags |= kMuIsCond;
}
if (unlock) {
this->UnlockSlow(&waitp);
this->Block(waitp.thread);
flags |= kMuHasBlocked;
}
this->LockSlowLoop(&waitp, flags);
return waitp.cond != nullptr ||
cond == nullptr ||
EvalConditionAnnotated(cond, this, true, false, how == kShared);
}
#define RAW_CHECK_FMT(cond, ...) \
do { \
if (ABSL_PREDICT_FALSE(!(cond))) { \
ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
} \
} while (0)
static void CheckForMutexCorruption(intptr_t v, const char* label) {
const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
static_assert(kMuReader << 3 == kMuWriter, "must match");
static_assert(kMuWait << 3 == kMuWrWait, "must match");
if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
"%s: Mutex corrupt: both reader and writer lock held: %p",
label, reinterpret_cast<void*>(v));
RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
"%s: Mutex corrupt: waiting writer with no waiters: %p", label,
reinterpret_cast<void*>(v));
assert(false);
}
void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
PostSynchEvent(
this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
for (;;) {
v = mu_.load(std::memory_order_relaxed);
CheckForMutexCorruption(v, "Lock");
if ((v & waitp->how->slow_need_zero) == 0) {
if (mu_.compare_exchange_strong(
v,
(waitp->how->fast_or |
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
waitp->how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false,
waitp->how == kShared)) {
break;
}
this->UnlockSlow(waitp);
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
}
} else {
bool dowait = false;
if ((v & (kMuSpin | kMuWait)) == 0) {
PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv =
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
kMuWait;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
nv |= kMuWrWait;
}
if (mu_.compare_exchange_strong(
v, reinterpret_cast<intptr_t>(new_h) | nv,
std::memory_order_release, std::memory_order_relaxed)) {
dowait = true;
} else {
waitp->thread->waitp = nullptr;
}
} else if ((v & waitp->how->slow_inc_need_zero &
IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
if (mu_.compare_exchange_strong(
v,
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch* h = GetPerThreadSynch(v);
h->readers += kMuOne;
do {
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
std::memory_order_release,
std::memory_order_relaxed));
if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false,
waitp->how == kShared)) {
break;
}
this->UnlockSlow(waitp);
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
}
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(
v,
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch* h = GetPerThreadSynch(v);
PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
intptr_t wr_wait = 0;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
wr_wait = kMuWrWait;
}
do {
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(
v,
(v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed));
dowait = true;
}
if (dowait) {
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
}
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
c = synchronization_internal::MutexDelay(c, GENTLE);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
if ((v & kMuEvent) != 0) {
PostSynchEvent(this, waitp->how == kExclusive
? SYNCH_EV_LOCK_RETURNING
: SYNCH_EV_READERLOCK_RETURNING);
}
}
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
if ((v & kMuEvent) != 0) {
PostSynchEvent(
this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
}
int c = 0;
PerThreadSynch* w = nullptr;
PerThreadSynch* pw = nullptr;
PerThreadSynch* old_h = nullptr;
PerThreadSynch* wake_list = kPerThreadSynchNull;
intptr_t wr_wait = 0;
ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
for (;;) {
v = mu_.load(std::memory_order_relaxed);
if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
waitp == nullptr) {
if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
std::memory_order_release,
std::memory_order_relaxed)) {
return;
}
} else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
return;
}
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
if ((v & kMuWait) == 0) {
intptr_t nv;
bool do_enqueue = true;
ABSL_RAW_CHECK(waitp != nullptr,
"UnlockSlow is confused");
do {
v = mu_.load(std::memory_order_relaxed);
intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
PerThreadSynch* new_h = nullptr;
if (do_enqueue) {
do_enqueue = (waitp->cv_word == nullptr);
new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
}
intptr_t clear = kMuWrWait | kMuWriter;
if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) {
clear = kMuWrWait | kMuReader;
}
nv = (v & kMuLow & ~clear & ~kMuSpin);
if (new_h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
} else {
nv |= new_readers & kMuHigh;
}
} while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
break;
}
PerThreadSynch* h = GetPerThreadSynch(v);
if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
h->readers -= kMuOne;
intptr_t nv = v;
if (waitp != nullptr) {
PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"waiters disappeared during Enqueue()!");
nv &= kMuLow;
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
}
mu_.store(nv, std::memory_order_release);
break;
}
ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
"Mutex queue changed beneath us");
if (old_h != nullptr &&
!old_h->may_skip) {
old_h->may_skip = true;
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next;
}
}
if (h->next->waitp->how == kExclusive &&
h->next->waitp->cond == nullptr) {
pw = h;
w = h->next;
w->wake = true;
wr_wait = kMuWrWait;
} else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
if (pw == nullptr) {
pw = h;
}
} else {
if (old_h == h) {
intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
h->readers = 0;
h->maybe_unlocking = false;
if (waitp != nullptr) {
PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
nv &= kMuLow;
if (new_h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
}
}
mu_.store(nv, std::memory_order_release);
break;
}
PerThreadSynch* w_walk;
PerThreadSynch* pw_walk;
if (old_h != nullptr) {
pw_walk = old_h;
w_walk = old_h->next;
} else {
pw_walk =
nullptr;
w_walk = h->next;
}
h->may_skip = false;
ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
h->maybe_unlocking = true;
mu_.store(v, std::memory_order_release);
old_h = h;
while (pw_walk != h) {
w_walk->wake = false;
if (w_walk->waitp->cond ==
nullptr ||
EvalConditionIgnored(this, w_walk->waitp->cond)) {
if (w == nullptr) {
w_walk->wake = true;
w = w_walk;
pw = pw_walk;
if (w_walk->waitp->how == kExclusive) {
wr_wait = kMuWrWait;
break;
}
} else if (w_walk->waitp->how == kShared) {
w_walk->wake = true;
} else {
wr_wait = kMuWrWait;
}
}
if (w_walk->wake) {
pw_walk = w_walk;
} else {
pw_walk = Skip(w_walk);
}
if (pw_walk != h) {
w_walk = pw_walk->next;
}
}
continue;
}
ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
h = DequeueAllWakeable(h, pw, &wake_list);
intptr_t nv = (v & kMuEvent) | kMuDesig;
if (waitp != nullptr) {
h = Enqueue(h, waitp, v, kMuIsCond);
}
ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
"unexpected empty wake list");
if (h != nullptr) {
h->readers = 0;
h->maybe_unlocking = false;
nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
}
mu_.store(nv, std::memory_order_release);
break;
}
c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
}
if (wake_list != kPerThreadSynchNull) {
int64_t total_wait_cycles = 0;
int64_t max_wait_cycles = 0;
int64_t now = CycleClock::Now();
do {
if (!wake_list->cond_waiter) {
int64_t cycles_waited =
(now - wake_list->waitp->contention_start_cycles);
total_wait_cycles += cycles_waited;
if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
wake_list->waitp->contention_start_cycles = now;
wake_list->waitp->should_submit_contention_data = true;
}
wake_list = Wakeup(wake_list);
} while (wake_list != kPerThreadSynchNull);
if (total_wait_cycles > 0) {
mutex_tracer("slow release", this, total_wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(total_wait_cycles);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
}
}
}
void Mutex::Trans(MuHow how) {
this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
}
void Mutex::Fer(PerThreadSynch* w) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
"Mutex::Fer with pending CondVar queueing");
w->waitp->timeout = {};
for (;;) {
intptr_t v = mu_.load(std::memory_order_relaxed);
const intptr_t conflicting =
kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
if ((v & conflicting) == 0) {
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
IncrementSynchSem(this, w);
return;
} else {
if ((v & (kMuSpin | kMuWait)) == 0) {
PerThreadSynch* new_h =
Enqueue(nullptr, w->waitp, v, kMuIsCond | kMuIsFer);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed");
if (mu_.compare_exchange_strong(
v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
std::memory_order_release, std::memory_order_relaxed)) {
return;
}
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
PerThreadSynch* h = GetPerThreadSynch(v);
PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond | kMuIsFer);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed");
do {
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(
v,
(v & kMuLow & ~kMuSpin) | kMuWait |
reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed));
return;
}
}
c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
void Mutex::AssertHeld() const {
if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
SynchEvent* e = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
void Mutex::AssertReaderHeld() const {
if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
SynchEvent* e = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL,
"thread should hold at least a read lock on Mutex %p %s",
static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
static const intptr_t kCvSpin = 0x0001L;
static const intptr_t kCvEvent = 0x0002L;
static const intptr_t kCvLow = 0x0003L;
enum {
kGdbCvSpin = kCvSpin,
kGdbCvEvent = kCvEvent,
kGdbCvLow = kCvLow,
};
static_assert(PerThreadSynch::kAlignment > kCvLow,
"PerThreadSynch::kAlignment must be greater than kCvLow");
void CondVar::EnableDebugLog(const char* name) {
SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
e->log = true;
UnrefSynchEvent(e);
}
void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
PerThreadSynch* w = h;
while (w->next != s && w->next != h) {
w = w->next;
}
if (w->next == s) {
w->next = s->next;
if (h == s) {
h = (w == s) ? nullptr : w;
}
s->next = nullptr;
s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
}
}
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
return;
} else {
c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
}
static void CondVarEnqueue(SynchWaitParams* waitp) {
std::atomic<intptr_t>* cv_word = waitp->cv_word;
waitp->cv_word = nullptr;
intptr_t v = cv_word->load(std::memory_order_relaxed);
int c = 0;
while ((v & kCvSpin) != 0 ||
!cv_word->compare_exchange_weak(v, v | kCvSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
c = synchronization_internal::MutexDelay(c, GENTLE);
v = cv_word->load(std::memory_order_relaxed);
}
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
waitp->thread->waitp = waitp;
PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h == nullptr) {
waitp->thread->next = waitp->thread;
} else {
waitp->thread->next = h->next;
h->next = waitp->thread;
}
waitp->thread->state.store(PerThreadSynch::kQueued,
std::memory_order_relaxed);
cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
std::memory_order_release);
}
bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
bool rc = false;
intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
intptr_t v = cv_.load(std::memory_order_relaxed);
cond_var_tracer("Wait", this);
if ((v & kCvEvent) != 0) {
PostSynchEvent(this, SYNCH_EV_WAIT);
}
SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
Synch_GetPerThreadAnnotated(mutex), &cv_);
mutex->UnlockSlow(&waitp);
while (waitp.thread->state.load(std::memory_order_acquire) ==
PerThreadSynch::kQueued) {
if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
t = KernelTimeout::Never();
this->Remove(waitp.thread);
rc = true;
}
}
ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
waitp.thread->waitp = nullptr;
cond_var_tracer("Unwait", this);
if ((v & kCvEvent) != 0) {
PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
}
ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
mutex->Trans(mutex_how);
ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
return rc;
}
void CondVar::Signal() {
SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed); v != 0;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
PerThreadSynch* w = nullptr;
if (h != nullptr) {
w = h->next;
if (w == h) {
h = nullptr;
} else {
h->next = w->next;
}
}
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
if (w != nullptr) {
w->waitp->cvmu->Fer(w);
cond_var_tracer("Signal wakeup", this);
}
if ((v & kCvEvent) != 0) {
PostSynchEvent(this, SYNCH_EV_SIGNAL);
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
}
void CondVar::SignalAll() {
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed); v != 0;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
std::memory_order_relaxed)) {
PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
PerThreadSynch* w;
PerThreadSynch* n = h->next;
do {
w = n;
n = n->next;
w->waitp->cvmu->Fer(w);
} while (w != h);
cond_var_tracer("SignalAll wakeup", this);
}
if ((v & kCvEvent) != 0) {
PostSynchEvent(this, SYNCH_EV_SIGNALALL);
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
}
void ReleasableMutexLock::Release() {
ABSL_RAW_CHECK(this->mu_ != nullptr,
"ReleasableMutexLock::Release may only be called once");
this->mu_->Unlock();
this->mu_ = nullptr;
}
#ifdef ABSL_HAVE_THREAD_SANITIZER
extern "C" void __tsan_read1(void* addr);
#else
#define __tsan_read1(addr)
#endif
static bool Dereference(void* arg) {
__tsan_read1(arg);
return *(static_cast<bool*>(arg));
}
ABSL_CONST_INIT const Condition Condition::kTrue;
Condition::Condition(bool (*func)(void*), void* arg)
: eval_(&CallVoidPtrFunction), arg_(arg) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer passed to Condition.");
StoreCallback(func);
}
bool Condition::CallVoidPtrFunction(const Condition* c) {
using FunctionPointer = bool (*)(void*);
FunctionPointer function_pointer;
std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
return (*function_pointer)(c->arg_);
}
Condition::Condition(const bool* cond)
: eval_(CallVoidPtrFunction),
arg_(const_cast<bool*>(cond)) {
using FunctionPointer = bool (*)(void*);
const FunctionPointer dereference = Dereference;
StoreCallback(dereference);
}
bool Condition::Eval() const { return (*this->eval_)(this); }
bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
if (a == nullptr || b == nullptr) {
return a == b;
}
return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
!memcmp(a->callback_, b->callback_, sizeof(a->callback_));
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/mutex.h"
#ifdef _WIN32
#include <windows.h>
#endif
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <functional>
#include <memory>
#include <random>
#include <string>
#include <thread>
#include <type_traits>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/synchronization/internal/create_thread_identity.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
#include <pthread.h>
#include <string.h>
#endif
namespace {
static constexpr bool kExtendedTest = false;
std::unique_ptr<absl::synchronization_internal::ThreadPool> CreatePool(
int threads) {
return absl::make_unique<absl::synchronization_internal::ThreadPool>(threads);
}
std::unique_ptr<absl::synchronization_internal::ThreadPool>
CreateDefaultPool() {
return CreatePool(kExtendedTest ? 32 : 10);
}
static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
absl::Duration after,
const std::function<void()> &func) {
tp->Schedule([func, after] {
absl::SleepFor(after);
func();
});
}
struct ScopedInvariantDebugging {
ScopedInvariantDebugging() { absl::EnableMutexInvariantDebugging(true); }
~ScopedInvariantDebugging() { absl::EnableMutexInvariantDebugging(false); }
};
struct TestContext {
int iterations;
int threads;
int g0;
int g1;
absl::Mutex mu;
absl::CondVar cv;
};
static std::atomic<bool> invariant_checked;
static bool GetInvariantChecked() {
return invariant_checked.load(std::memory_order_relaxed);
}
static void SetInvariantChecked(bool new_value) {
invariant_checked.store(new_value, std::memory_order_relaxed);
}
static void CheckSumG0G1(void *v) {
TestContext *cxt = static_cast<TestContext *>(v);
CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
SetInvariantChecked(true);
}
static void TestMu(TestContext *cxt, int c) {
for (int i = 0; i != cxt->iterations; i++) {
absl::MutexLock l(&cxt->mu);
int a = cxt->g0 + 1;
cxt->g0 = a;
cxt->g1--;
}
}
static void TestTry(TestContext *cxt, int c) {
for (int i = 0; i != cxt->iterations; i++) {
do {
std::this_thread::yield();
} while (!cxt->mu.TryLock());
int a = cxt->g0 + 1;
cxt->g0 = a;
cxt->g1--;
cxt->mu.Unlock();
}
}
static void TestR20ms(TestContext *cxt, int c) {
for (int i = 0; i != cxt->iterations; i++) {
absl::ReaderMutexLock l(&cxt->mu);
absl::SleepFor(absl::Milliseconds(20));
cxt->mu.AssertReaderHeld();
}
}
static void TestRW(TestContext *cxt, int c) {
if ((c & 1) == 0) {
for (int i = 0; i != cxt->iterations; i++) {
absl::WriterMutexLock l(&cxt->mu);
cxt->g0++;
cxt->g1--;
cxt->mu.AssertHeld();
cxt->mu.AssertReaderHeld();
}
} else {
for (int i = 0; i != cxt->iterations; i++) {
absl::ReaderMutexLock l(&cxt->mu);
CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
cxt->mu.AssertReaderHeld();
}
}
}
struct MyContext {
int target;
TestContext *cxt;
bool MyTurn();
};
bool MyContext::MyTurn() {
TestContext *cxt = this->cxt;
return cxt->g0 == this->target || cxt->g0 == cxt->iterations;
}
static void TestAwait(TestContext *cxt, int c) {
MyContext mc;
mc.target = c;
mc.cxt = cxt;
absl::MutexLock l(&cxt->mu);
cxt->mu.AssertHeld();
while (cxt->g0 < cxt->iterations) {
cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
CHECK(mc.MyTurn()) << "Error in TestAwait";
cxt->mu.AssertHeld();
if (cxt->g0 < cxt->iterations) {
int a = cxt->g0 + 1;
cxt->g0 = a;
mc.target += cxt->threads;
}
}
}
static void TestSignalAll(TestContext *cxt, int c) {
int target = c;
absl::MutexLock l(&cxt->mu);
cxt->mu.AssertHeld();
while (cxt->g0 < cxt->iterations) {
while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
cxt->cv.Wait(&cxt->mu);
}
if (cxt->g0 < cxt->iterations) {
int a = cxt->g0 + 1;
cxt->g0 = a;
cxt->cv.SignalAll();
target += cxt->threads;
}
}
}
static void TestSignal(TestContext *cxt, int c) {
CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
int target = c;
absl::MutexLock l(&cxt->mu);
cxt->mu.AssertHeld();
while (cxt->g0 < cxt->iterations) {
while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
cxt->cv.Wait(&cxt->mu);
}
if (cxt->g0 < cxt->iterations) {
int a = cxt->g0 + 1;
cxt->g0 = a;
cxt->cv.Signal();
target += cxt->threads;
}
}
}
static void TestCVTimeout(TestContext *cxt, int c) {
int target = c;
absl::MutexLock l(&cxt->mu);
cxt->mu.AssertHeld();
while (cxt->g0 < cxt->iterations) {
while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
}
if (cxt->g0 < cxt->iterations) {
int a = cxt->g0 + 1;
cxt->g0 = a;
cxt->cv.SignalAll();
target += cxt->threads;
}
}
}
static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
static void TestTime(TestContext *cxt, int c, bool use_cv) {
CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
const bool kFalse = false;
absl::Condition false_cond(&kFalse);
absl::Condition g0ge2(G0GE2, cxt);
if (c == 0) {
absl::MutexLock l(&cxt->mu);
absl::Time start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
<< "TestTime failed";
}
absl::Duration elapsed = absl::Now() - start;
CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
<< "TestTime failed";
CHECK_EQ(cxt->g0, 1) << "TestTime failed";
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
<< "TestTime failed";
}
elapsed = absl::Now() - start;
CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
<< "TestTime failed";
cxt->g0++;
if (use_cv) {
cxt->cv.Signal();
}
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
} else {
CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
<< "TestTime failed";
}
elapsed = absl::Now() - start;
CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
<< "TestTime failed";
CHECK_GE(cxt->g0, 3) << "TestTime failed";
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
<< "TestTime failed";
}
elapsed = absl::Now() - start;
CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
<< "TestTime failed";
if (use_cv) {
cxt->cv.SignalAll();
}
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
<< "TestTime failed";
}
elapsed = absl::Now() - start;
CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
<< "TestTime failed";
CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
} else if (c == 1) {
absl::MutexLock l(&cxt->mu);
const absl::Time start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
} else {
CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
<< "TestTime failed";
}
const absl::Duration elapsed = absl::Now() - start;
CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
<< "TestTime failed";
cxt->g0++;
} else if (c == 2) {
absl::MutexLock l(&cxt->mu);
if (use_cv) {
while (cxt->g0 < 2) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
}
} else {
CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
<< "TestTime failed";
}
cxt->g0++;
} else {
absl::MutexLock l(&cxt->mu);
if (use_cv) {
while (cxt->g0 < 2) {
cxt->cv.Wait(&cxt->mu);
}
} else {
cxt->mu.Await(g0ge2);
}
cxt->g0++;
}
}
static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); }
static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
const std::function<void(int)> &cb) {
mu->Lock();
int c = (*c0)++;
mu->Unlock();
cb(c);
absl::MutexLock l(mu);
(*c1)++;
cv->Signal();
}
static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
int threads, int iterations, int operations) {
absl::Mutex mu2;
absl::CondVar cv2;
int c0 = 0;
int c1 = 0;
cxt->g0 = 0;
cxt->g1 = 0;
cxt->iterations = iterations;
cxt->threads = threads;
absl::synchronization_internal::ThreadPool tp(threads);
for (int i = 0; i != threads; i++) {
tp.Schedule(std::bind(
&EndTest, &c0, &c1, &mu2, &cv2,
std::function<void(int)>(std::bind(test, cxt, std::placeholders::_1))));
}
mu2.Lock();
while (c1 != threads) {
cv2.Wait(&mu2);
}
mu2.Unlock();
return cxt->g0;
}
static int RunTest(void (*test)(TestContext *cxt, int), int threads,
int iterations, int operations) {
TestContext cxt;
return RunTestCommon(&cxt, test, threads, iterations, operations);
}
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
int threads, int iterations,
int operations,
void (*invariant)(void *)) {
ScopedInvariantDebugging scoped_debugging;
SetInvariantChecked(false);
TestContext cxt;
cxt.mu.EnableInvariantDebugging(invariant, &cxt);
int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
CHECK(GetInvariantChecked()) << "Invariant not checked";
return ret;
}
#endif
struct TimeoutBugStruct {
absl::Mutex mu;
bool a;
int a_waiter_count;
};
static void WaitForA(TimeoutBugStruct *x) {
x->mu.LockWhen(absl::Condition(&x->a));
x->a_waiter_count--;
x->mu.Unlock();
}
static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; }
TEST(Mutex, CondVarWaitSignalsAwait) {
struct {
absl::Mutex barrier_mu;
bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
absl::Mutex release_mu;
bool release ABSL_GUARDED_BY(release_mu) = false;
absl::CondVar released_cv;
} state;
auto pool = CreateDefaultPool();
pool->Schedule([&state] {
state.release_mu.Lock();
state.barrier_mu.Lock();
state.barrier = true;
state.barrier_mu.Unlock();
state.release_mu.Await(absl::Condition(&state.release));
state.released_cv.Signal();
state.release_mu.Unlock();
});
state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
state.barrier_mu.Unlock();
state.release_mu.Lock();
state.release = true;
state.released_cv.Wait(&state.release_mu);
state.release_mu.Unlock();
}
TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
struct {
absl::Mutex barrier_mu;
bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
absl::Mutex release_mu;
bool release ABSL_GUARDED_BY(release_mu) = false;
absl::CondVar released_cv;
} state;
auto pool = CreateDefaultPool();
pool->Schedule([&state] {
state.release_mu.Lock();
state.barrier_mu.Lock();
state.barrier = true;
state.barrier_mu.Unlock();
state.release_mu.Await(absl::Condition(&state.release));
state.released_cv.Signal();
state.release_mu.Unlock();
});
state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
state.barrier_mu.Unlock();
state.release_mu.Lock();
state.release = true;
EXPECT_TRUE(
!state.released_cv.WaitWithTimeout(&state.release_mu, absl::Seconds(10)))
<< "; Unrecoverable test failure: CondVar::WaitWithTimeout did not "
"unblock the absl::Mutex::Await call in another thread.";
state.release_mu.Unlock();
}
TEST(Mutex, MutexTimeoutBug) {
auto tp = CreateDefaultPool();
TimeoutBugStruct x;
x.a = false;
x.a_waiter_count = 2;
tp->Schedule(std::bind(&WaitForA, &x));
tp->Schedule(std::bind(&WaitForA, &x));
absl::SleepFor(absl::Seconds(1));
bool always_false = false;
x.mu.LockWhenWithTimeout(absl::Condition(&always_false),
absl::Milliseconds(500));
x.a = true;
x.mu.Await(absl::Condition(&NoAWaiters, &x));
x.mu.Unlock();
}
struct CondVarWaitDeadlock : testing::TestWithParam<int> {
absl::Mutex mu;
absl::CondVar cv;
bool cond1 = false;
bool cond2 = false;
bool read_lock1;
bool read_lock2;
bool signal_unlocked;
CondVarWaitDeadlock() {
read_lock1 = GetParam() & (1 << 0);
read_lock2 = GetParam() & (1 << 1);
signal_unlocked = GetParam() & (1 << 2);
}
void Waiter1() {
if (read_lock1) {
mu.ReaderLock();
while (!cond1) {
cv.Wait(&mu);
}
mu.ReaderUnlock();
} else {
mu.Lock();
while (!cond1) {
cv.Wait(&mu);
}
mu.Unlock();
}
}
void Waiter2() {
if (read_lock2) {
mu.ReaderLockWhen(absl::Condition(&cond2));
mu.ReaderUnlock();
} else {
mu.LockWhen(absl::Condition(&cond2));
mu.Unlock();
}
}
};
TEST_P(CondVarWaitDeadlock, Test) {
auto waiter1 = CreatePool(1);
auto waiter2 = CreatePool(1);
waiter1->Schedule([this] { this->Waiter1(); });
waiter2->Schedule([this] { this->Waiter2(); });
absl::SleepFor(absl::Milliseconds(100));
mu.Lock();
cond1 = true;
if (signal_unlocked) {
mu.Unlock();
cv.Signal();
} else {
cv.Signal();
mu.Unlock();
}
waiter1.reset();
mu.Lock();
cond2 = true;
mu.Unlock();
waiter2.reset();
}
INSTANTIATE_TEST_SUITE_P(CondVarWaitDeadlockTest, CondVarWaitDeadlock,
::testing::Range(0, 8),
::testing::PrintToStringParamName());
struct DequeueAllWakeableBugStruct {
absl::Mutex mu;
absl::Mutex mu2;
int unfinished_count;
bool done1;
int finished_count;
bool done2;
};
static void AcquireAsReader(DequeueAllWakeableBugStruct *x) {
x->mu.ReaderLock();
x->mu2.Lock();
x->unfinished_count--;
x->done1 = (x->unfinished_count == 0);
x->mu2.Unlock();
absl::SleepFor(absl::Seconds(2));
x->mu.ReaderUnlock();
x->mu2.Lock();
x->finished_count--;
x->done2 = (x->finished_count == 0);
x->mu2.Unlock();
}
TEST(Mutex, MutexReaderWakeupBug) {
auto tp = CreateDefaultPool();
DequeueAllWakeableBugStruct x;
x.unfinished_count = 2;
x.done1 = false;
x.finished_count = 2;
x.done2 = false;
x.mu.Lock();
tp->Schedule(std::bind(&AcquireAsReader, &x));
tp->Schedule(std::bind(&AcquireAsReader, &x));
absl::SleepFor(absl::Seconds(1));
x.mu.Unlock();
EXPECT_TRUE(
x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10)));
x.mu2.Unlock();
EXPECT_TRUE(
x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10)));
x.mu2.Unlock();
}
struct LockWhenTestStruct {
absl::Mutex mu1;
bool cond = false;
absl::Mutex mu2;
bool waiting = false;
};
static bool LockWhenTestIsCond(LockWhenTestStruct *s) {
s->mu2.Lock();
s->waiting = true;
s->mu2.Unlock();
return s->cond;
}
static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) {
s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
s->mu1.Unlock();
}
TEST(Mutex, LockWhen) {
LockWhenTestStruct s;
std::thread t(LockWhenTestWaitForIsCond, &s);
s.mu2.LockWhen(absl::Condition(&s.waiting));
s.mu2.Unlock();
s.mu1.Lock();
s.cond = true;
s.mu1.Unlock();
t.join();
}
TEST(Mutex, LockWhenGuard) {
absl::Mutex mu;
int n = 30;
bool done = false;
bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
std::thread t1([&mu, &n, &done, cond_eq_10]() {
absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
done = true;
});
std::thread t2[10];
for (std::thread &t : t2) {
t = std::thread([&mu, &n, cond_lt_10]() {
absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
++n;
});
}
{
absl::MutexLock lock(&mu);
n = 0;
}
for (std::thread &t : t2) t.join();
t1.join();
EXPECT_TRUE(done);
EXPECT_EQ(n, 10);
}
#if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
struct ReaderDecrementBugStruct {
bool cond;
int done;
absl::Mutex mu;
bool waiting_on_cond;
bool have_reader_lock;
bool complete;
absl::Mutex mu2;
};
static bool IsCond(void *v) {
ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
x->mu2.Lock();
x->waiting_on_cond = true;
x->mu2.Unlock();
return x->cond;
}
static bool AllDone(void *v) {
ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
return x->done == 0;
}
static void WaitForCond(ReaderDecrementBugStruct *x) {
absl::Mutex dummy;
absl::MutexLock l(&dummy);
x->mu.LockWhen(absl::Condition(&IsCond, x));
x->done--;
x->mu.Unlock();
}
static void GetReadLock(ReaderDecrementBugStruct *x) {
x->mu.ReaderLock();
x->mu2.Lock();
x->have_reader_lock = true;
x->mu2.Await(absl::Condition(&x->complete));
x->mu2.Unlock();
x->mu.ReaderUnlock();
x->mu.Lock();
x->done--;
x->mu.Unlock();
}
TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
ReaderDecrementBugStruct x;
x.cond = false;
x.waiting_on_cond = false;
x.have_reader_lock = false;
x.complete = false;
x.done = 2;
std::thread thread1(WaitForCond, &x);
x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond));
x.mu2.Unlock();
std::thread thread2(GetReadLock, &x);
x.mu2.LockWhen(absl::Condition(&x.have_reader_lock));
x.mu2.Unlock();
x.mu.ReaderLock();
x.mu.ReaderUnlock();
x.mu.AssertReaderHeld();
x.mu2.Lock();
x.complete = true;
x.mu2.Unlock();
x.mu.Lock();
x.cond = true;
x.mu.Await(absl::Condition(&AllDone, &x));
x.mu.Unlock();
thread1.join();
thread2.join();
}
#endif
#ifdef ABSL_HAVE_THREAD_SANITIZER
TEST(Mutex, DISABLED_LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#else
TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#endif
for (int i = 0; i != 10; i++) {
const int kNumLocks = 10;
auto mu = absl::make_unique<absl::Mutex[]>(kNumLocks);
for (int j = 0; j != kNumLocks; j++) {
if ((j % 2) == 0) {
mu[j].WriterLock();
} else {
mu[j].ReaderLock();
}
}
}
}
bool Equals42(int *p) { return *p == 42; }
bool Equals43(int *p) { return *p == 43; }
bool ConstEquals42(const int *p) { return *p == 42; }
bool ConstEquals43(const int *p) { return *p == 43; }
template <typename T>
bool TemplateEquals42(T *p) {
return *p == 42;
}
template <typename T>
bool TemplateEquals43(T *p) {
return *p == 43;
}
TEST(Mutex, FunctionPointerCondition) {
int x = 42;
const int const_x = 42;
EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
decltype(&const_x)>::value));
EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
decltype(&const_x)>::value));
}
struct Base {
explicit Base(int v) : value(v) {}
int value;
};
struct Derived : Base {
explicit Derived(int v) : Base(v) {}
};
bool BaseEquals42(Base *p) { return p->value == 42; }
bool BaseEquals43(Base *p) { return p->value == 43; }
bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
Derived derived(42);
const Derived const_derived(42);
EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
Base *>::value));
EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
const Base *>::value));
EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
Derived *>::value));
EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
const Derived *>::value));
}
struct Constable {
bool WotsAllThisThen() const { return true; }
};
TEST(Mutex, FunctionPointerConditionWithConstMethod) {
const Constable chapman;
EXPECT_TRUE(absl::Condition(&chapman, &Constable::WotsAllThisThen).Eval());
}
struct True {
template <class... Args>
bool operator()(Args...) const {
return true;
}
};
struct DerivedTrue : True {};
TEST(Mutex, FunctorCondition) {
{
True f;
EXPECT_TRUE(absl::Condition(&f).Eval());
}
{
DerivedTrue g;
EXPECT_TRUE(absl::Condition(&g).Eval());
}
{
int value = 3;
auto is_zero = [&value] { return value == 0; };
absl::Condition c(&is_zero);
EXPECT_FALSE(c.Eval());
value = 0;
EXPECT_TRUE(c.Eval());
}
{
int value = 0;
auto is_positive = std::bind(std::less<int>(), 0, std::cref(value));
absl::Condition c(&is_positive);
EXPECT_FALSE(c.Eval());
value = 1;
EXPECT_TRUE(c.Eval());
}
{
int value = 3;
std::function<bool()> is_zero = [&value] { return value == 0; };
absl::Condition c(&is_zero);
EXPECT_FALSE(c.Eval());
value = 0;
EXPECT_TRUE(c.Eval());
}
}
TEST(Mutex, ConditionSwap) {
bool b1 = true;
absl::Condition c1(&b1);
bool b2 = false;
absl::Condition c2(&b2);
EXPECT_TRUE(c1.Eval());
EXPECT_FALSE(c2.Eval());
std::swap(c1, c2);
EXPECT_FALSE(c1.Eval());
EXPECT_TRUE(c2.Eval());
}
static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
int *running) {
std::random_device dev;
std::mt19937 gen(dev());
std::uniform_int_distribution<int> random_millis(0, 15);
mu->ReaderLock();
while (*running == 3) {
absl::SleepFor(absl::Milliseconds(random_millis(gen)));
cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
}
mu->ReaderUnlock();
mu->Lock();
(*running)--;
mu->Unlock();
}
static bool IntIsZero(int *x) { return *x == 0; }
TEST(Mutex, TestReaderOnCondVar) {
auto tp = CreateDefaultPool();
absl::Mutex mu;
absl::CondVar cv;
int running = 3;
tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
absl::SleepFor(absl::Seconds(2));
mu.Lock();
running--;
mu.Await(absl::Condition(&IntIsZero, &running));
mu.Unlock();
}
struct AcquireFromConditionStruct {
absl::Mutex mu0;
int value;
bool done;
absl::Mutex mu1;
absl::CondVar cv;
};
static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
x->value++;
if (x->value == 2 || x->value == 3) {
bool always_false = false;
x->mu1.LockWhenWithTimeout(absl::Condition(&always_false),
absl::Milliseconds(100));
x->mu1.Unlock();
}
CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
return x->value == 2 || x->value == 3;
}
static void WaitForCond2(AcquireFromConditionStruct *x) {
x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x));
x->done = true;
x->mu0.Unlock();
}
TEST(Mutex, AcquireFromCondition) {
auto tp = CreateDefaultPool();
AcquireFromConditionStruct x;
x.value = 0;
x.done = false;
tp->Schedule(
std::bind(&WaitForCond2, &x));
absl::SleepFor(absl::Milliseconds(500));
x.mu0.Lock();
x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500));
x.mu0.Unlock();
x.mu0.LockWhen(absl::Condition(&x.done));
x.mu0.Unlock();
}
TEST(Mutex, DeadlockDetector) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
absl::Mutex m1;
absl::Mutex m2;
absl::Mutex m3;
absl::Mutex m4;
m1.Lock();
m2.Lock();
m3.Lock();
m3.Unlock();
m2.Unlock();
m1.ForgetDeadlockInfo();
m2.Lock();
m3.Lock();
m4.Lock();
m3.Unlock();
m2.Unlock();
m4.Unlock();
m1.Unlock();
}
class ScopedDisableBazelTestWarnings {
public:
ScopedDisableBazelTestWarnings() {
#ifdef _WIN32
char file[MAX_PATH];
if (GetEnvironmentVariableA(kVarName, file, sizeof(file)) < sizeof(file)) {
warnings_output_file_ = file;
SetEnvironmentVariableA(kVarName, nullptr);
}
#else
const char *file = getenv(kVarName);
if (file != nullptr) {
warnings_output_file_ = file;
unsetenv(kVarName);
}
#endif
}
~ScopedDisableBazelTestWarnings() {
if (!warnings_output_file_.empty()) {
#ifdef _WIN32
SetEnvironmentVariableA(kVarName, warnings_output_file_.c_str());
#else
setenv(kVarName, warnings_output_file_.c_str(), 0);
#endif
}
}
private:
static const char kVarName[];
std::string warnings_output_file_;
};
const char ScopedDisableBazelTestWarnings::kVarName[] =
"TEST_WARNINGS_OUTPUT_FILE";
#ifdef ABSL_HAVE_THREAD_SANITIZER
TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) {
#else
TEST(Mutex, DeadlockDetectorBazelWarning) {
#endif
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
absl::Mutex mu0;
absl::Mutex mu1;
bool got_mu0 = mu0.TryLock();
mu1.Lock();
if (got_mu0) {
mu0.Unlock();
}
if (mu0.TryLock()) {
mu0.Unlock();
}
mu0.Lock();
mu0.Unlock();
mu1.Unlock();
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
}
TEST(Mutex, DeadlockDetectorLongCycle) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
std::vector<absl::Mutex> mutex(100);
for (size_t i = 0; i != mutex.size(); i++) {
mutex[i].Lock();
mutex[(i + 1) % mutex.size()].Lock();
mutex[i].Unlock();
mutex[(i + 1) % mutex.size()].Unlock();
}
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
}
TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
const int n_locks = 1 << 17;
auto array_of_locks = absl::make_unique<absl::Mutex[]>(n_locks);
for (int i = 0; i < n_locks; i++) {
int end = std::min(n_locks, i + 5);
for (int j = i; j < end; j++) {
array_of_locks[j].Lock();
}
for (int j = i; j < end; j++) {
array_of_locks[j].Unlock();
}
}
}
#ifdef ABSL_HAVE_THREAD_SANITIZER
TEST(Mutex, DISABLED_DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#else
TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#endif
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
absl::Mutex *a = new absl::Mutex;
absl::Mutex b, c;
a->Lock();
b.Lock();
b.Unlock();
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
delete a;
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
c.Lock();
c.Unlock();
}
static absl::Duration TimeoutTestAllowedSchedulingDelay() {
return absl::Milliseconds(150);
}
ABSL_MUST_USE_RESULT
static bool DelayIsWithinBounds(absl::Duration expected_delay,
absl::Duration actual_delay) {
bool pass = true;
if (actual_delay < expected_delay) {
LOG(WARNING) << "Actual delay " << actual_delay
<< " was too short, expected " << expected_delay
<< " (difference " << actual_delay - expected_delay << ")";
pass = false;
}
absl::Duration tolerance = expected_delay <= absl::ZeroDuration()
? absl::Milliseconds(10)
: TimeoutTestAllowedSchedulingDelay();
if (actual_delay > expected_delay + tolerance) {
LOG(WARNING) << "Actual delay " << actual_delay
<< " was too long, expected " << expected_delay
<< " (difference " << actual_delay - expected_delay << ")";
pass = false;
}
return pass;
}
struct TimeoutTestParam {
const char *from_file;
int from_line;
bool use_absolute_deadline;
absl::Duration wait_timeout;
absl::Duration satisfy_condition_delay;
bool expected_result;
absl::Duration expected_delay;
};
std::ostream &operator<<(std::ostream &os, const TimeoutTestParam ¶m) {
return os << "from: " << param.from_file << ":" << param.from_line
<< " use_absolute_deadline: "
<< (param.use_absolute_deadline ? "true" : "false")
<< " wait_timeout: " << param.wait_timeout
<< " satisfy_condition_delay: " << param.satisfy_condition_delay
<< " expected_result: "
<< (param.expected_result ? "true" : "false")
<< " expected_delay: " << param.expected_delay;
}
static void RunAfterDelay(absl::Duration delay,
absl::synchronization_internal::ThreadPool *pool,
const std::function<void()> &callback) {
if (delay <= absl::ZeroDuration()) {
callback();
} else if (delay != absl::InfiniteDuration()) {
ScheduleAfter(pool, delay, callback);
}
}
class TimeoutTest : public ::testing::Test,
public ::testing::WithParamInterface<TimeoutTestParam> {};
std::vector<TimeoutTestParam> MakeTimeoutTestParamValues() {
const absl::Duration finite = 3 * TimeoutTestAllowedSchedulingDelay();
const absl::Duration never = absl::InfiniteDuration();
const absl::Duration negative = -absl::InfiniteDuration();
const absl::Duration immediate = absl::ZeroDuration();
std::vector<TimeoutTestParam> values;
for (bool use_absolute_deadline : {false, true}) {
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
negative,
immediate,
true,
immediate,
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
negative,
finite,
false,
immediate
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
negative,
never,
false,
immediate
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
never,
immediate,
true,
immediate
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
never,
finite,
true,
finite,
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
never,
immediate,
true,
immediate
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
finite * 2,
finite,
true,
finite
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
finite,
finite * 2,
false,
finite
});
values.push_back(TimeoutTestParam{
__FILE__, __LINE__, use_absolute_deadline,
finite,
never,
false,
finite
});
}
return values;
}
INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
testing::ValuesIn(MakeTimeoutTestParamValues()));
TEST_P(TimeoutTest, Await) {
const TimeoutTestParam params = GetParam();
LOG(INFO) << "Params: " << params;
for (int attempt = 1;; ++attempt) {
LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false;
std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
CreateDefaultPool();
RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
absl::MutexLock l(&mu);
value = true;
});
absl::MutexLock lock(&mu);
absl::Time start_time = absl::Now();
absl::Condition cond(&value);
bool result =
params.use_absolute_deadline
? mu.AwaitWithDeadline(cond, start_time + params.wait_timeout)
: mu.AwaitWithTimeout(cond, params.wait_timeout);
if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
EXPECT_EQ(params.expected_result, result);
break;
}
}
}
TEST_P(TimeoutTest, LockWhen) {
const TimeoutTestParam params = GetParam();
LOG(INFO) << "Params: " << params;
for (int attempt = 1;; ++attempt) {
LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false;
std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
CreateDefaultPool();
RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
absl::MutexLock l(&mu);
value = true;
});
absl::Time start_time = absl::Now();
absl::Condition cond(&value);
bool result =
params.use_absolute_deadline
? mu.LockWhenWithDeadline(cond, start_time + params.wait_timeout)
: mu.LockWhenWithTimeout(cond, params.wait_timeout);
mu.Unlock();
if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
EXPECT_EQ(params.expected_result, result);
break;
}
}
}
TEST_P(TimeoutTest, ReaderLockWhen) {
const TimeoutTestParam params = GetParam();
LOG(INFO) << "Params: " << params;
for (int attempt = 0;; ++attempt) {
LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false;
std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
CreateDefaultPool();
RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
absl::MutexLock l(&mu);
value = true;
});
absl::Time start_time = absl::Now();
bool result =
params.use_absolute_deadline
? mu.ReaderLockWhenWithDeadline(absl::Condition(&value),
start_time + params.wait_timeout)
: mu.ReaderLockWhenWithTimeout(absl::Condition(&value),
params.wait_timeout);
mu.ReaderUnlock();
if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
EXPECT_EQ(params.expected_result, result);
break;
}
}
}
TEST_P(TimeoutTest, Wait) {
const TimeoutTestParam params = GetParam();
LOG(INFO) << "Params: " << params;
for (int attempt = 0;; ++attempt) {
LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false;
absl::CondVar cv;
std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
CreateDefaultPool();
RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
absl::MutexLock l(&mu);
value = true;
cv.Signal();
});
absl::MutexLock lock(&mu);
absl::Time start_time = absl::Now();
absl::Duration timeout = params.wait_timeout;
absl::Time deadline = start_time + timeout;
while (!value) {
if (params.use_absolute_deadline ? cv.WaitWithDeadline(&mu, deadline)
: cv.WaitWithTimeout(&mu, timeout)) {
break;
}
timeout = deadline - absl::Now();
}
bool result = value;
if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
EXPECT_EQ(params.expected_result, result);
break;
}
}
}
TEST(Mutex, Logging) {
absl::Mutex logged_mutex;
logged_mutex.EnableDebugLog("fido_mutex");
absl::CondVar logged_cv;
logged_cv.EnableDebugLog("rover_cv");
logged_mutex.Lock();
logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20));
logged_mutex.Unlock();
logged_mutex.ReaderLock();
logged_mutex.ReaderUnlock();
logged_mutex.Lock();
logged_mutex.Unlock();
logged_cv.Signal();
logged_cv.SignalAll();
}
TEST(Mutex, LoggingAddressReuse) {
ScopedInvariantDebugging scoped_debugging;
alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
auto invariant =
+[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
constexpr size_t kIters = 10;
bool alive[kIters] = {};
for (size_t i = 0; i < kIters; ++i) {
absl::Mutex *mu = new (storage) absl::Mutex;
alive[i] = true;
mu->EnableDebugLog("Mutex");
mu->EnableInvariantDebugging(invariant, &alive[i]);
mu->Lock();
mu->Unlock();
mu->~Mutex();
alive[i] = false;
}
}
TEST(Mutex, LoggingBankrupcy) {
ScopedInvariantDebugging scoped_debugging;
std::vector<absl::Mutex> mus(1 << 20);
for (auto &mu : mus) {
mu.EnableDebugLog("Mutex");
}
}
TEST(Mutex, SynchEventRace) {
ScopedInvariantDebugging scoped_debugging;
std::vector<std::thread> threads;
for (size_t i = 0; i < 5; i++) {
threads.emplace_back([&] {
for (size_t j = 0; j < (1 << 17); j++) {
{
absl::Mutex mu;
mu.EnableInvariantDebugging([](void *) {}, nullptr);
mu.Lock();
mu.Unlock();
}
{
absl::Mutex mu;
mu.EnableDebugLog("Mutex");
}
}
});
}
for (auto &thread : threads) {
thread.join();
}
}
static std::vector<int> AllThreadCountValues() {
if (kExtendedTest) {
return {2, 4, 8, 10, 16, 20, 24, 30, 32};
}
return {2, 4, 10};
}
class MutexVariableThreadCountTest : public ::testing::TestWithParam<int> {};
INSTANTIATE_TEST_SUITE_P(ThreadCounts, MutexVariableThreadCountTest,
::testing::ValuesIn(AllThreadCountValues()),
::testing::PrintToStringParamName());
static int ScaleIterations(int x) {
#if defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
return x / 10;
#else
return x;
#endif
}
TEST_P(MutexVariableThreadCountTest, Mutex) {
int threads = GetParam();
int iterations = ScaleIterations(10000000) / threads;
int operations = threads * iterations;
EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
iterations = std::min(iterations, 10);
operations = threads * iterations;
EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
operations, CheckSumG0G1),
operations);
#endif
}
TEST_P(MutexVariableThreadCountTest, Try) {
int threads = GetParam();
int iterations = 1000000 / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
iterations = std::min(iterations, 10);
operations = threads * iterations;
EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
operations, CheckSumG0G1),
operations);
#endif
}
TEST_P(MutexVariableThreadCountTest, R20ms) {
int threads = GetParam();
int iterations = 100;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestR20ms, threads, iterations, operations), 0);
}
TEST_P(MutexVariableThreadCountTest, RW) {
int threads = GetParam();
int iterations = ScaleIterations(20000000) / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
iterations = std::min(iterations, 10);
operations = threads * iterations;
EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
operations, CheckSumG0G1),
operations / 2);
#endif
}
TEST_P(MutexVariableThreadCountTest, Await) {
int threads = GetParam();
int iterations = ScaleIterations(500000);
int operations = iterations;
EXPECT_EQ(RunTest(&TestAwait, threads, iterations, operations), operations);
}
TEST_P(MutexVariableThreadCountTest, SignalAll) {
int threads = GetParam();
int iterations = 200000 / threads;
int operations = iterations;
EXPECT_EQ(RunTest(&TestSignalAll, threads, iterations, operations),
operations);
}
TEST(Mutex, Signal) {
int threads = 2;
int iterations = 200000;
int operations = iterations;
EXPECT_EQ(RunTest(&TestSignal, threads, iterations, operations), operations);
}
TEST(Mutex, Timed) {
int threads = 10;
int iterations = 1000;
int operations = iterations;
EXPECT_EQ(RunTest(&TestCVTimeout, threads, iterations, operations),
operations);
}
TEST(Mutex, CVTime) {
int threads = 10;
int iterations = 1;
EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations);
}
TEST(Mutex, MuTime) {
int threads = 10;
int iterations = 1;
EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
}
TEST(Mutex, SignalExitedThread) {
#if defined(__wasm__) || defined(__asmjs__)
constexpr int kThreads = 1;
#else
constexpr int kThreads = 100;
#endif
std::vector<std::thread> top;
for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) {
top.emplace_back([&]() {
for (int i = 0; i < kThreads; i++) {
absl::Mutex mu;
std::thread t([&]() {
mu.Lock();
mu.Unlock();
});
mu.Lock();
mu.Unlock();
t.join();
}
});
}
for (auto &th : top) th.join();
}
TEST(Mutex, WriterPriority) {
absl::Mutex mu;
bool wrote = false;
std::atomic<bool> saw_wrote{false};
auto readfunc = [&]() {
for (size_t i = 0; i < 10; ++i) {
absl::ReaderMutexLock lock(&mu);
if (wrote) {
saw_wrote = true;
break;
}
absl::SleepFor(absl::Seconds(1));
}
};
std::thread t1(readfunc);
absl::SleepFor(absl::Milliseconds(500));
std::thread t2(readfunc);
std::thread t3([&]() {
absl::MutexLock lock(&mu);
wrote = true;
});
t1.join();
t2.join();
t3.join();
EXPECT_TRUE(saw_wrote.load());
}
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
TEST(Mutex, CondVarPriority) {
int err = 0;
sched_param param;
param.sched_priority = 7;
std::thread test([&]() {
err = pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m);
});
test.join();
if (err) {
GTEST_SKIP() << "failed to set priority: " << strerror(err);
}
absl::Mutex mu;
absl::CondVar cv;
bool locked = false;
bool notified = false;
bool waiting = false;
bool morph = false;
std::thread th([&]() {
EXPECT_EQ(0, pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m));
mu.Lock();
locked = true;
mu.Await(absl::Condition(¬ified));
mu.Unlock();
EXPECT_EQ(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
->per_thread_synch.priority,
param.sched_priority);
mu.Lock();
mu.Await(absl::Condition(&waiting));
morph = true;
absl::SleepFor(absl::Seconds(1));
cv.Signal();
mu.Unlock();
});
mu.Lock();
mu.Await(absl::Condition(&locked));
notified = true;
mu.Unlock();
mu.Lock();
waiting = true;
while (!morph) {
cv.Wait(&mu);
}
mu.Unlock();
th.join();
EXPECT_NE(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
->per_thread_synch.priority,
param.sched_priority);
}
#endif
TEST(Mutex, LockWhenWithTimeoutResult) {
absl::Mutex mu;
const bool kAlwaysTrue = true, kAlwaysFalse = false;
const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
mu.Unlock();
EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
std::thread th1([&]() {
EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
mu.Unlock();
});
std::thread th2([&]() {
EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
mu.Unlock();
});
absl::SleepFor(absl::Milliseconds(100));
mu.Unlock();
th1.join();
th2.join();
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/mutex.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/mutex_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f23edeb0-accf-4935-9f5b-21406e911acd | cpp | google/quiche | balsa_headers_sequence | quiche/balsa/balsa_headers_sequence.cc | quiche/balsa/balsa_headers_sequence_test.cc | #include "quiche/balsa/balsa_headers_sequence.h"
#include <memory>
#include <utility>
#include "quiche/balsa/balsa_headers.h"
namespace quiche {
void BalsaHeadersSequence::Append(std::unique_ptr<BalsaHeaders> headers) {
sequence_.push_back(std::move(headers));
}
bool BalsaHeadersSequence::HasNext() const { return next_ < sequence_.size(); }
BalsaHeaders* BalsaHeadersSequence::PeekNext() {
if (!HasNext()) {
return nullptr;
}
return sequence_[next_].get();
}
BalsaHeaders* BalsaHeadersSequence::Next() {
if (!HasNext()) {
return nullptr;
}
return sequence_[next_++].get();
}
void BalsaHeadersSequence::Clear() {
sequence_.clear();
next_ = 0;
}
} | #include "quiche/balsa/balsa_headers_sequence.h"
#include <memory>
#include <utility>
#include "quiche/balsa/balsa_headers.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
namespace {
TEST(BalsaHeadersSequenceTest, Initial) {
BalsaHeadersSequence sequence;
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.Next(), nullptr);
EXPECT_TRUE(sequence.IsEmpty());
}
TEST(BalsaHeadersSequenceTest, Basic) {
BalsaHeadersSequence sequence;
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
const BalsaHeaders* headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_FALSE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
EXPECT_EQ(sequence.Next(), nullptr);
}
TEST(BalsaHeadersSequenceTest, Clear) {
BalsaHeadersSequence sequence;
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
sequence.Clear();
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.Next(), nullptr);
EXPECT_TRUE(sequence.IsEmpty());
}
TEST(BalsaHeadersSequenceTest, PeekNext) {
BalsaHeadersSequence sequence;
EXPECT_EQ(sequence.PeekNext(), nullptr);
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
const BalsaHeaders* headers = sequence.PeekNext();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), headers);
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), headers);
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
headers = sequence.PeekNext();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_TRUE(sequence.HasNext());
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), nullptr);
}
TEST(BalsaHeadersSequenceTest, CanRetainValidReference) {
BalsaHeadersSequence sequence;
auto headers = std::make_unique<BalsaHeaders>();
headers->AppendHeader("one", "fish");
BalsaHeaders* headers_ptr = headers.get();
sequence.Append(std::move(headers));
ASSERT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.Next(), headers_ptr);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/balsa/balsa_headers_sequence.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/balsa/balsa_headers_sequence_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
5c3a7bc1-a537-48b5-8488-5855798de70c | cpp | tensorflow/tensorflow | serialize_utils | tensorflow/core/tfrt/saved_model/utils/serialize_utils.cc | tensorflow/core/tfrt/saved_model/utils/serialize_utils_test.cc | #include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include <cstring>
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/ToolOutputFile.h"
#include "mlir/Support/FileUtilities.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
absl::Status SerializeBEF(const tfrt::BefBuffer &bef,
const std::string &filepath) {
std::string errorMessage;
auto output = mlir::openOutputFile(filepath, &errorMessage);
(output->os()).write(reinterpret_cast<const char *>(bef.data()), bef.size());
output->keep();
LOG(INFO) << "Completed serializing BEF to: " << filepath;
return absl::OkStatus();
}
absl::StatusOr<tfrt::BefBuffer> DeserializeBEFBuffer(
const std::string &filepath) {
std::string data;
TF_CHECK_OK(ReadFileToString(tsl::Env::Default(), filepath, &data));
tfrt::BefBuffer bef(data.begin(), data.end());
LOG(INFO) << "Successfully loaded serialized BEF from: " << filepath;
return bef;
}
absl::Status SerializeMLRTBytecode(const mlrt::bc::Buffer &bytecode,
const std::string &filepath) {
std::string errorMessage;
auto output = mlir::openOutputFile(filepath, &errorMessage);
(output->os())
.write(reinterpret_cast<const char *>(bytecode.data()), bytecode.size());
output->keep();
LOG(INFO) << "Completed serializing MLRTBytecode to: " << filepath;
return absl::OkStatus();
}
absl::StatusOr<mlrt::bc::Buffer> DeserializeMlrtBytecodeBuffer(
const std::string &filepath) {
std::string bytecode_data;
TF_CHECK_OK(ReadFileToString(tsl::Env::Default(), filepath, &bytecode_data));
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
allocator.Allocate(bytecode_data.length(), alignof(char));
memcpy(buffer.data(), bytecode_data.data(), bytecode_data.length());
LOG(INFO) << "Successfully loaded serialized MLRTBytecode from: " << filepath;
return buffer;
}
}
} | #include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_util.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(SerializeBEFTest, HandlesCompleteProcess) {
tfrt::BefBuffer old_bef;
const std::string saved_model_mlir_path =
"third_party/tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"test.mlir";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::unique_ptr<Runtime> runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&old_bef, model_context));
const std::string filepath =
io::JoinPath(getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
std::string("serialized_bef.mlir.bef"));
TF_ASSERT_OK(tensorflow::tfrt_stub::SerializeBEF(old_bef, filepath));
ASSERT_NE(old_bef.size(), 0);
TF_ASSERT_OK_AND_ASSIGN(const tfrt::BefBuffer bef,
DeserializeBEFBuffer(filepath));
ASSERT_TRUE(old_bef.size() == bef.size());
std::unique_ptr<Runtime> default_runtime =
DefaultTfrtRuntime(1);
SavedModel::Options default_options =
DefaultSavedModelOptions(default_runtime.get());
TF_EXPECT_OK(tfrt::CreateBefFileFromBefBuffer(
*default_options.graph_execution_options.runtime, bef)
.status());
}
TEST(SerializeMLRTTest, HandlesSerializeAndDeserializeProcess) {
mlrt::bc::Buffer old_bytecode;
const std::string saved_model_mlir_path =
"third_party/tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"test.mlir";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
mlir::OwningOpRef<mlir::ModuleOp> module_with_op_keys;
std::unique_ptr<Runtime> runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
options.enable_mlrt = true;
tfrt::ResourceContext resource_context;
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK_AND_ASSIGN(
old_bytecode, mlrt_compiler::ConvertTfMlirToBytecode(
options.compile_options, *fallback_state, module.get(),
model_context, &module_with_op_keys));
const std::string aot_package_path =
GetAotPackagePath(getenv("TEST_UNDECLARED_OUTPUTS_DIR"));
tsl::Env* env = tsl::Env::Default();
TF_ASSERT_OK(env->RecursivelyCreateDir(aot_package_path));
const std::string filepath =
io::JoinPath(aot_package_path, std::string("serialized_mlrt.mlir.mlrt"));
TF_ASSERT_OK(
tensorflow::tfrt_stub::SerializeMLRTBytecode(old_bytecode, filepath));
ASSERT_NE(old_bytecode.size(), 0);
mlrt::bc::Buffer bytecode;
TF_ASSERT_OK_AND_ASSIGN(bytecode, DeserializeMlrtBytecodeBuffer(filepath));
ASSERT_TRUE(old_bytecode.size() == bytecode.size());
EXPECT_STREQ(old_bytecode.data(), bytecode.data());
TF_ASSERT_OK_AND_ASSIGN(
bytecode,
LoadMlrtAndMlir(options.compile_options, module_with_op_keys.get(),
getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
fallback_state.get()));
ASSERT_TRUE(old_bytecode.size() == bytecode.size());
EXPECT_STREQ(old_bytecode.data(), bytecode.data());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/saved_model/utils/serialize_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/saved_model/utils/serialize_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d80d504-3b5e-493a-9924-c14be6ac9cb3 | cpp | tensorflow/tensorflow | gcs_file_system | third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system_test.cc | #include "tsl/platform/cloud/gcs_file_system.h"
#include <stdio.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "tsl/platform/file_statistics.h"
#include "tsl/platform/strcat.h"
#ifdef _WIN32
#include <io.h>
#endif
#include "absl/base/macros.h"
#include "json/json.h"
#include "tsl/platform/cloud/curl_http_request.h"
#include "tsl/platform/cloud/file_block_cache.h"
#include "tsl/platform/cloud/google_auth_provider.h"
#include "tsl/platform/cloud/ram_file_block_cache.h"
#include "tsl/platform/cloud/time_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/profiler/lib/traceme.h"
#ifdef _WIN32
#ifdef DeleteFile
#undef DeleteFile
#endif
#endif
namespace tsl {
namespace {
constexpr char kGcsUriBase[] = "https:
constexpr char kGcsUploadUriBase[] =
"https:
constexpr char kStorageHost[] = "storage.googleapis.com";
constexpr char kBucketMetadataLocationKey[] = "location";
constexpr size_t kReadAppendableFileBufferSize = 1024 * 1024;
constexpr int kGetChildrenDefaultPageSize = 1000;
constexpr uint64 HTTP_CODE_RESUME_INCOMPLETE = 308;
constexpr uint64 HTTP_CODE_PRECONDITION_FAILED = 412;
ABSL_DEPRECATED("Use GCS_READ_CACHE_BLOCK_SIZE_MB instead.")
constexpr char kReadaheadBufferSize[] = "GCS_READAHEAD_BUFFER_SIZE_BYTES";
constexpr char kStatCacheMaxAge[] = "GCS_STAT_CACHE_MAX_AGE";
constexpr uint64 kStatCacheDefaultMaxAge = 5;
constexpr char kStatCacheMaxEntries[] = "GCS_STAT_CACHE_MAX_ENTRIES";
constexpr size_t kStatCacheDefaultMaxEntries = 1024;
constexpr char kMatchingPathsCacheMaxAge[] = "GCS_MATCHING_PATHS_CACHE_MAX_AGE";
constexpr uint64 kMatchingPathsCacheDefaultMaxAge = 0;
constexpr char kMatchingPathsCacheMaxEntries[] =
"GCS_MATCHING_PATHS_CACHE_MAX_ENTRIES";
constexpr size_t kMatchingPathsCacheDefaultMaxEntries = 1024;
constexpr size_t kBucketLocationCacheMaxEntries = 10;
constexpr size_t kCacheNeverExpire = std::numeric_limits<uint64>::max();
const FileStatistics DIRECTORY_STAT(0, 0, true);
constexpr char kResolveCacheSecs[] = "GCS_RESOLVE_REFRESH_SECS";
constexpr char kRequestConnectionTimeout[] =
"GCS_REQUEST_CONNECTION_TIMEOUT_SECS";
constexpr char kRequestIdleTimeout[] = "GCS_REQUEST_IDLE_TIMEOUT_SECS";
constexpr char kMetadataRequestTimeout[] = "GCS_METADATA_REQUEST_TIMEOUT_SECS";
constexpr char kReadRequestTimeout[] = "GCS_READ_REQUEST_TIMEOUT_SECS";
constexpr char kWriteRequestTimeout[] = "GCS_WRITE_REQUEST_TIMEOUT_SECS";
constexpr char kAdditionalRequestHeader[] = "GCS_ADDITIONAL_REQUEST_HEADER";
constexpr char kThrottleRate[] = "GCS_THROTTLE_TOKEN_RATE";
constexpr char kThrottleBucket[] = "GCS_THROTTLE_BUCKET_SIZE";
constexpr char kTokensPerRequest[] = "GCS_TOKENS_PER_REQUEST";
constexpr char kInitialTokens[] = "GCS_INITIAL_TOKENS";
constexpr char kRetryConfigInitialDelayTimeUs[] =
"GCS_RETRY_CONFIG_INIT_DELAY_TIME_US";
constexpr char kRetryConfigMaxDelayTimeUs[] =
"GCS_RETRY_CONFIG_MAX_DELAY_TIME_US";
constexpr char kRetryConfigMaxRetries[] = "GCS_RETRY_CONFIG_MAX_RETRIES";
constexpr char kAllowedBucketLocations[] = "GCS_ALLOWED_BUCKET_LOCATIONS";
constexpr char kDetectZoneSentinelValue[] = "auto";
constexpr char kAppendMode[] = "GCS_APPEND_MODE";
constexpr char kComposeAppend[] = "compose";
absl::Status GetTmpFilename(string* filename) {
*filename = io::GetTempFilename("");
return absl::OkStatus();
}
string MaybeAppendSlash(const string& name) {
if (name.empty()) {
return "/";
}
if (name.back() != '/') {
return strings::StrCat(name, "/");
}
return name;
}
string JoinGcsPath(const string& path, const string& subpath) {
return strings::StrCat(MaybeAppendSlash(path), subpath);
}
std::set<string> AddAllSubpaths(const std::vector<string>& paths) {
std::set<string> result;
result.insert(paths.begin(), paths.end());
for (const string& path : paths) {
absl::string_view subpath = io::Dirname(path);
while (!(subpath.empty() || subpath == "/")) {
result.emplace(string(subpath));
subpath = io::Dirname(subpath);
}
}
return result;
}
absl::Status ParseJson(absl::string_view json, Json::Value* result) {
Json::Reader reader;
if (!reader.parse(json.data(), json.data() + json.size(), *result)) {
return errors::Internal("Couldn't parse JSON response from GCS.");
}
return absl::OkStatus();
}
absl::Status ParseJson(const std::vector<char>& json, Json::Value* result) {
return ParseJson(absl::string_view{json.data(), json.size()}, result);
}
absl::Status GetValue(const Json::Value& parent, const char* name,
Json::Value* result) {
*result = parent.get(name, Json::Value::null);
if (result->isNull()) {
return errors::Internal("The field '", name,
"' was expected in the JSON response.");
}
return absl::OkStatus();
}
absl::Status GetStringValue(const Json::Value& parent, const char* name,
string* result) {
Json::Value result_value;
TF_RETURN_IF_ERROR(GetValue(parent, name, &result_value));
if (!result_value.isString()) {
return errors::Internal(
"The field '", name,
"' in the JSON response was expected to be a string.");
}
*result = result_value.asString();
return absl::OkStatus();
}
absl::Status GetInt64Value(const Json::Value& parent, const char* name,
int64_t* result) {
Json::Value result_value;
TF_RETURN_IF_ERROR(GetValue(parent, name, &result_value));
if (result_value.isNumeric()) {
*result = result_value.asInt64();
return absl::OkStatus();
}
if (result_value.isString() &&
strings::safe_strto64(result_value.asCString(), result)) {
return absl::OkStatus();
}
return errors::Internal(
"The field '", name,
"' in the JSON response was expected to be a number.");
}
absl::Status GetBoolValue(const Json::Value& parent, const char* name,
bool* result) {
Json::Value result_value;
TF_RETURN_IF_ERROR(GetValue(parent, name, &result_value));
if (!result_value.isBool()) {
return errors::Internal(
"The field '", name,
"' in the JSON response was expected to be a boolean.");
}
*result = result_value.asBool();
return absl::OkStatus();
}
RetryConfig GetGcsRetryConfig() {
RetryConfig retryConfig(
1000 * 1000,
32 * 1000 * 1000,
10);
uint64 init_delay_time_us;
if (GetEnvVar(kRetryConfigInitialDelayTimeUs, strings::safe_strtou64,
&init_delay_time_us)) {
retryConfig.init_delay_time_us = init_delay_time_us;
}
uint64 max_delay_time_us;
if (GetEnvVar(kRetryConfigMaxDelayTimeUs, strings::safe_strtou64,
&max_delay_time_us)) {
retryConfig.max_delay_time_us = max_delay_time_us;
}
uint32 max_retries;
if (GetEnvVar(kRetryConfigMaxRetries, strings::safe_strtou32, &max_retries)) {
retryConfig.max_retries = max_retries;
}
VLOG(1) << "GCS RetryConfig: "
<< "init_delay_time_us = " << retryConfig.init_delay_time_us << " ; "
<< "max_delay_time_us = " << retryConfig.max_delay_time_us << " ; "
<< "max_retries = " << retryConfig.max_retries;
return retryConfig;
}
class GcsRandomAccessFile : public RandomAccessFile {
public:
using ReadFn = std::function<absl::Status(
const string& filename, uint64 offset, size_t n,
absl::string_view* result, char* scratch)>;
GcsRandomAccessFile(const string& filename, ReadFn read_fn)
: filename_(filename), read_fn_(std::move(read_fn)) {}
absl::Status Name(absl::string_view* result) const override {
*result = filename_;
return absl::OkStatus();
}
absl::Status Read(uint64 offset, size_t n, absl::string_view* result,
char* scratch) const override {
return read_fn_(filename_, offset, n, result, scratch);
}
private:
const string filename_;
const ReadFn read_fn_;
};
class BufferedGcsRandomAccessFile : public RandomAccessFile {
public:
using ReadFn = std::function<absl::Status(
const string& filename, uint64 offset, size_t n,
absl::string_view* result, char* scratch)>;
BufferedGcsRandomAccessFile(const string& filename, uint64 buffer_size,
ReadFn read_fn)
: filename_(filename),
read_fn_(std::move(read_fn)),
buffer_size_(buffer_size),
buffer_start_(0),
buffer_end_is_past_eof_(false) {}
absl::Status Name(absl::string_view* result) const override {
*result = filename_;
return absl::OkStatus();
}
absl::Status Read(uint64 offset, size_t n, absl::string_view* result,
char* scratch) const override {
if (n > buffer_size_) {
return read_fn_(filename_, offset, n, result, scratch);
}
{
mutex_lock l(buffer_mutex_);
size_t buffer_end = buffer_start_ + buffer_.size();
size_t copy_size = 0;
if (offset < buffer_end && offset >= buffer_start_) {
copy_size = std::min(n, static_cast<size_t>(buffer_end - offset));
memcpy(scratch, buffer_.data() + (offset - buffer_start_), copy_size);
*result = absl::string_view(scratch, copy_size);
}
bool consumed_buffer_to_eof =
offset + copy_size >= buffer_end && buffer_end_is_past_eof_;
if (copy_size < n && !consumed_buffer_to_eof) {
absl::Status status = FillBuffer(offset + copy_size);
if (!status.ok() && !absl::IsOutOfRange(status)) {
buffer_.resize(0);
return status;
}
size_t remaining_copy = std::min(n - copy_size, buffer_.size());
memcpy(scratch + copy_size, buffer_.data(), remaining_copy);
copy_size += remaining_copy;
*result = absl::string_view(scratch, copy_size);
}
if (copy_size < n) {
buffer_end_is_past_eof_ = false;
return errors::OutOfRange("EOF reached. Requested to read ", n,
" bytes from ", offset, ".");
}
}
return absl::OkStatus();
}
private:
absl::Status FillBuffer(uint64 start) const
TF_EXCLUSIVE_LOCKS_REQUIRED(buffer_mutex_) {
buffer_start_ = start;
buffer_.resize(buffer_size_);
absl::string_view str_piece;
absl::Status status = read_fn_(filename_, buffer_start_, buffer_size_,
&str_piece, &(buffer_[0]));
buffer_end_is_past_eof_ = absl::IsOutOfRange(status);
buffer_.resize(str_piece.size());
return status;
}
const string filename_;
const ReadFn read_fn_;
const uint64 buffer_size_;
mutable mutex buffer_mutex_;
mutable uint64 buffer_start_ TF_GUARDED_BY(buffer_mutex_);
mutable bool buffer_end_is_past_eof_ TF_GUARDED_BY(buffer_mutex_);
mutable string buffer_ TF_GUARDED_BY(buffer_mutex_);
};
typedef std::function<absl::Status(
uint64 start_offset, const std::string& object_to_upload,
const std::string& bucket, uint64 file_size, const std::string& gcs_path,
UploadSessionHandle* session_handle)>
SessionCreator;
typedef std::function<absl::Status(
const std::string& session_uri, uint64 start_offset,
uint64 already_uploaded, const std::string& tmp_content_filename,
uint64 file_size, const std::string& file_path)>
ObjectUploader;
typedef std::function<absl::Status(const string& session_uri, uint64 file_size,
const std::string& gcs_path, bool* completed,
uint64* uploaded)>
StatusPoller;
typedef std::function<absl::Status(const string& fname, const string& bucket,
const string& object, int64_t* generation)>
GenerationGetter;
class GcsWritableFile : public WritableFile {
public:
GcsWritableFile(const string& bucket, const string& object,
GcsFileSystem* filesystem,
GcsFileSystem::TimeoutConfig* timeouts,
std::function<void()> file_cache_erase,
RetryConfig retry_config, bool compose_append,
SessionCreator session_creator,
ObjectUploader object_uploader, StatusPoller status_poller,
GenerationGetter generation_getter)
: bucket_(bucket),
object_(object),
filesystem_(filesystem),
timeouts_(timeouts),
file_cache_erase_(std::move(file_cache_erase)),
sync_needed_(true),
retry_config_(retry_config),
compose_append_(compose_append),
start_offset_(0),
session_creator_(std::move(session_creator)),
object_uploader_(std::move(object_uploader)),
status_poller_(std::move(status_poller)),
generation_getter_(std::move(generation_getter)) {
VLOG(3) << "GcsWritableFile: " << GetGcsPath();
if (GetTmpFilename(&tmp_content_filename_).ok()) {
outfile_.open(tmp_content_filename_,
std::ofstream::binary | std::ofstream::app);
}
}
GcsWritableFile(const string& bucket, const string& object,
GcsFileSystem* filesystem, const string& tmp_content_filename,
GcsFileSystem::TimeoutConfig* timeouts,
std::function<void()> file_cache_erase,
RetryConfig retry_config, bool compose_append,
SessionCreator session_creator,
ObjectUploader object_uploader, StatusPoller status_poller,
GenerationGetter generation_getter)
: bucket_(bucket),
object_(object),
filesystem_(filesystem),
timeouts_(timeouts),
file_cache_erase_(std::move(file_cache_erase)),
sync_needed_(true),
retry_config_(retry_config),
compose_append_(compose_append),
start_offset_(0),
session_creator_(std::move(session_creator)),
object_uploader_(std::move(object_uploader)),
status_poller_(std::move(status_poller)),
generation_getter_(std::move(generation_getter)) {
VLOG(3) << "GcsWritableFile: " << GetGcsPath() << "with existing file "
<< tmp_content_filename;
tmp_content_filename_ = tmp_content_filename;
outfile_.open(tmp_content_filename_,
std::ofstream::binary | std::ofstream::app);
}
~GcsWritableFile() override {
Close().IgnoreError();
std::remove(tmp_content_filename_.c_str());
}
absl::Status Append(absl::string_view data) override {
TF_RETURN_IF_ERROR(CheckWritable());
VLOG(3) << "Append: " << GetGcsPath() << " size " << data.length();
sync_needed_ = true;
outfile_ << data;
if (!outfile_.good()) {
return errors::Internal(
"Could not append to the internal temporary file.");
}
return absl::OkStatus();
}
absl::Status Close() override {
VLOG(3) << "Close:" << GetGcsPath();
if (outfile_.is_open()) {
absl::Status sync_status = Sync();
if (sync_status.ok()) {
outfile_.close();
}
return sync_status;
}
return absl::OkStatus();
}
absl::Status Flush() override {
VLOG(3) << "Flush:" << GetGcsPath();
return Sync();
}
absl::Status Name(absl::string_view* result) const override {
*result = object_;
return absl::OkStatus();
}
absl::Status Sync() override {
VLOG(3) << "Sync started:" << GetGcsPath();
TF_RETURN_IF_ERROR(CheckWritable());
if (!sync_needed_) {
return absl::OkStatus();
}
absl::Status status = SyncImpl();
VLOG(3) << "Sync finished " << GetGcsPath();
if (status.ok()) {
sync_needed_ = false;
}
return status;
}
absl::Status Tell(int64_t* position) override {
*position = outfile_.tellp();
if (*position == -1) {
return errors::Internal("tellp on the internal temporary file failed");
}
return absl::OkStatus();
}
private:
absl::Status SyncImpl() {
outfile_.flush();
if (!outfile_.good()) {
return errors::Internal(
"Could not write to the internal temporary file.");
}
UploadSessionHandle session_handle;
uint64 start_offset = 0;
string object_to_upload = object_;
bool should_compose = false;
if (compose_append_) {
start_offset = start_offset_;
should_compose = start_offset > 0;
if (should_compose) {
object_to_upload =
strings::StrCat(io::Dirname(object_), "/.tmpcompose/",
io::Basename(object_), ".", start_offset_);
}
}
TF_RETURN_IF_ERROR(CreateNewUploadSession(start_offset, object_to_upload,
&session_handle));
uint64 already_uploaded = 0;
bool first_attempt = true;
const absl::Status upload_status = RetryingUtils::CallWithRetries(
[&first_attempt, &already_uploaded, &session_handle, &start_offset,
this]() {
if (session_handle.resumable && !first_attempt) {
bool completed;
TF_RETURN_IF_ERROR(RequestUploadSessionStatus(
session_handle.session_uri, &completed, &already_uploaded));
LOG(INFO) << "### RequestUploadSessionStatus: completed = "
<< completed
<< ", already_uploaded = " << already_uploaded
<< ", file = " << GetGcsPath();
if (completed) {
file_cache_erase_();
return absl::OkStatus();
}
}
first_attempt = false;
return UploadToSession(session_handle.session_uri, start_offset,
already_uploaded);
},
retry_config_);
if (absl::IsNotFound(upload_status)) {
return errors::Unavailable(
strings::StrCat("Upload to gs:
" failed, caused by: ", upload_status.message()));
}
if (upload_status.ok()) {
if (should_compose) {
TF_RETURN_IF_ERROR(AppendObject(object_to_upload));
}
TF_RETURN_IF_ERROR(GetCurrentFileSize(&start_offset_));
}
return upload_status;
}
absl::Status CheckWritable() const {
if (!outfile_.is_open()) {
return errors::FailedPrecondition(
"The internal temporary file is not writable.");
}
return absl::OkStatus();
}
absl::Status GetCurrentFileSize(uint64* size) {
const auto tellp = outfile_.tellp();
if (tellp == static_cast<std::streampos>(-1)) {
return errors::Internal(
"Could not get the size of the internal temporary file.");
}
*size = tellp;
return absl::OkStatus();
}
absl::Status CreateNewUploadSession(uint64 start_offset,
std::string object_to_upload,
UploadSessionHandle* session_handle) {
uint64 file_size;
TF_RETURN_IF_ERROR(GetCurrentFileSize(&file_size));
return session_creator_(start_offset, object_to_upload, bucket_, file_size,
GetGcsPath(), session_handle);
}
absl::Status AppendObject(string append_object) {
const string append_object_path = GetGcsPathWithObject(append_object);
VLOG(3) << "AppendObject: " << append_object_path << " to " << GetGcsPath();
int64_t generation = 0;
TF_RETURN_IF_ERROR(
generation_getter_(GetGcsPath(), bucket_, object_, &generation));
TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries(
[&append_object, &generation, this]() {
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(filesystem_->CreateHttpRequest(&request));
request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket_, "/o/",
request->EscapeString(object_),
"/compose"));
const string request_body = strings::StrCat(
"{'sourceObjects': [{'name': '", object_,
"','objectPrecondition':{'ifGenerationMatch':", generation,
"}},{'name': '", append_object, "'}]}");
request->SetTimeouts(timeouts_->connect, timeouts_->idle,
timeouts_->metadata);
request->AddHeader("content-type", "application/json");
request->SetPostFromBuffer(request_body.c_str(), request_body.size());
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(),
" when composing to ", GetGcsPath());
return absl::OkStatus();
},
retry_config_));
return RetryingUtils::DeleteWithRetries(
[&append_object_path, this]() {
return filesystem_->DeleteFile(append_object_path, nullptr);
},
retry_config_);
}
absl::Status RequestUploadSessionStatus(const string& session_uri,
bool* completed, uint64* uploaded) {
uint64 file_size;
TF_RETURN_IF_ERROR(GetCurrentFileSize(&file_size));
return status_poller_(session_uri, file_size, GetGcsPath(), completed,
uploaded);
}
absl::Status UploadToSession(const string& session_uri, uint64 start_offset,
uint64 already_uploaded) {
uint64 file_size;
TF_RETURN_IF_ERROR(GetCurrentFileSize(&file_size));
absl::Status status =
object_uploader_(session_uri, start_offset, already_uploaded,
tmp_content_filename_, file_size, GetGcsPath());
if (status.ok()) {
file_cache_erase_();
}
return status;
}
string GetGcsPathWithObject(string object) const {
return strings::StrCat("gs:
}
string GetGcsPath() const { return GetGcsPathWithObject(object_); }
string bucket_;
string object_;
GcsFileSystem* const filesystem_;
string tmp_content_filename_;
std::ofstream outfile_;
GcsFileSystem::TimeoutConfig* timeouts_;
std::function<void()> file_cache_erase_;
bool sync_needed_;
RetryConfig retry_config_ = GetGcsRetryConfig();
bool compose_append_;
uint64 start_offset_;
const SessionCreator session_creator_;
const ObjectUploader object_uploader_;
const StatusPoller status_poller_;
const GenerationGetter generation_getter_;
};
class GcsReadOnlyMemoryRegion : public ReadOnlyMemoryRegion {
public:
GcsReadOnlyMemoryRegion(std::unique_ptr<char[]> data, uint64 length)
: data_(std::move(data)), length_(length) {}
const void* data() override { return reinterpret_cast<void*>(data_.get()); }
uint64 length() override { return length_; }
private:
std::unique_ptr<char[]> data_;
uint64 length_;
};
bool StringPieceIdentity(absl::string_view str, absl::string_view* value) {
*value = str;
return true;
}
bool SplitByCommaToLowercaseSet(absl::string_view list,
std::unordered_set<string>* set) {
std::vector<string> vector = absl::StrSplit(absl::AsciiStrToLower(list), ',');
*set = std::unordered_set<string>(vector.begin(), vector.end());
return true;
}
string ZoneToRegion(string* zone) {
return zone->substr(0, zone->find_last_of('-'));
}
}
GcsFileSystem::GcsFileSystem(bool make_default_cache) {
uint64 value;
block_size_ = kDefaultBlockSize;
size_t max_bytes = kDefaultMaxCacheSize;
uint64 max_staleness = kDefaultMaxStaleness;
http_request_factory_ = std::make_shared<CurlHttpRequest::Factory>();
compute_engine_metadata_client_ =
std::make_shared<ComputeEngineMetadataClient>(http_request_factory_);
auth_provider_ = std::unique_ptr<AuthProvider>(
new GoogleAuthProvider(compute_engine_metadata_client_));
zone_provider_ = std::unique_ptr<ZoneProvider>(
new ComputeEngineZoneProvider(compute_engine_metadata_client_));
if (GetEnvVar(kReadaheadBufferSize, strings::safe_strtou64, &value)) {
block_size_ = value;
}
if (GetEnvVar(kBlockSize, strings::safe_strtou64, &value)) {
block_size_ = value * 1024 * 1024;
}
if (GetEnvVar(kMaxCacheSize, strings::safe_strtou64, &value)) {
max_bytes = value * 1024 * 1024;
}
if (GetEnvVar(kMaxStaleness, strings::safe_strtou64, &value)) {
max_staleness = value;
}
if (!make_default_cache) {
max_bytes = 0;
}
VLOG(1) << "GCS cache max size = " << max_bytes << " ; "
<< "block size = " << block_size_ << " ; "
<< "max staleness = " << max_staleness;
file_block_cache_ = MakeFileBlockCache(block_size_, max_bytes, max_staleness);
uint64 stat_cache_max_age = kStatCacheDefaultMaxAge;
size_t stat_cache_max_entries = kStatCacheDefaultMaxEntries;
if (GetEnvVar(kStatCacheMaxAge, strings::safe_strtou64, &value)) {
stat_cache_max_age = value;
}
if (GetEnvVar(kStatCacheMaxEntries, strings::safe_strtou64, &value)) {
stat_cache_max_entries = value;
}
stat_cache_.reset(new ExpiringLRUCache<GcsFileStat>(stat_cache_max_age,
stat_cache_max_entries));
uint64 matching_paths_cache_max_age = kMatchingPathsCacheDefaultMaxAge;
size_t matching_paths_cache_max_entries =
kMatchingPathsCacheDefaultMaxEntries;
if (GetEnvVar(kMatchingPathsCacheMaxAge, strings::safe_strtou64, &value)) {
matching_paths_cache_max_age = value;
}
if (GetEnvVar(kMatchingPathsCacheMaxEntries, strings::safe_strtou64,
&value)) {
matching_paths_cache_max_entries = value;
}
matching_paths_cache_.reset(new ExpiringLRUCache<std::vector<string>>(
matching_paths_cache_max_age, matching_paths_cache_max_entries));
bucket_location_cache_.reset(new ExpiringLRUCache<string>(
kCacheNeverExpire, kBucketLocationCacheMaxEntries));
int64_t resolve_frequency_secs;
if (GetEnvVar(kResolveCacheSecs, strings::safe_strto64,
&resolve_frequency_secs)) {
dns_cache_.reset(new GcsDnsCache(resolve_frequency_secs));
VLOG(1) << "GCS DNS cache is enabled. " << kResolveCacheSecs << " = "
<< resolve_frequency_secs;
} else {
VLOG(1) << "GCS DNS cache is disabled, because " << kResolveCacheSecs
<< " = 0 (or is not set)";
}
absl::string_view add_header_contents;
if (GetEnvVar(kAdditionalRequestHeader, StringPieceIdentity,
&add_header_contents)) {
size_t split = add_header_contents.find(':', 0);
if (split != absl::string_view::npos) {
absl::string_view header_name = add_header_contents.substr(0, split);
absl::string_view header_value = add_header_contents.substr(split + 1);
if (!header_name.empty() && !header_value.empty()) {
additional_header_.reset(new std::pair<const string, const string>(
string(header_name), string(header_value)));
VLOG(1) << "GCS additional header ENABLED. "
<< "Name: " << additional_header_->first << ", "
<< "Value: " << additional_header_->second;
} else {
LOG(ERROR) << "GCS additional header DISABLED. Invalid contents: "
<< add_header_contents;
}
} else {
LOG(ERROR) << "GCS additional header DISABLED. Invalid contents: "
<< add_header_contents;
}
} else {
VLOG(1) << "GCS additional header DISABLED. No environment variable set.";
}
uint32 timeout_value;
if (GetEnvVar(kRequestConnectionTimeout, strings::safe_strtou32,
&timeout_value)) {
timeouts_.connect = timeout_value;
}
if (GetEnvVar(kRequestIdleTimeout, strings::safe_strtou32, &timeout_value)) {
timeouts_.idle = timeout_value;
}
if (GetEnvVar(kMetadataRequestTimeout, strings::safe_strtou32,
&timeout_value)) {
timeouts_.metadata = timeout_value;
}
if (GetEnvVar(kReadRequestTimeout, strings::safe_strtou32, &timeout_value)) {
timeouts_.read = timeout_value;
}
if (GetEnvVar(kWriteRequestTimeout, strings::safe_strtou32, &timeout_value)) {
timeouts_.write = timeout_value;
}
int64_t token_value;
if (GetEnvVar(kThrottleRate, strings::safe_strto64, &token_value)) {
GcsThrottleConfig config;
config.enabled = true;
config.token_rate = token_value;
if (GetEnvVar(kThrottleBucket, strings::safe_strto64, &token_value)) {
config.bucket_size = token_value;
}
if (GetEnvVar(kTokensPerRequest, strings::safe_strto64, &token_value)) {
config.tokens_per_request = token_value;
}
if (GetEnvVar(kInitialTokens, strings::safe_strto64, &token_value)) {
config.initial_tokens = token_value;
}
throttle_.SetConfig(config);
}
GetEnvVar(kAllowedBucketLocations, SplitByCommaToLowercaseSet,
&allowed_locations_);
absl::string_view append_mode;
GetEnvVar(kAppendMode, StringPieceIdentity, &append_mode);
if (append_mode == kComposeAppend) {
compose_append_ = true;
} else {
compose_append_ = false;
}
retry_config_ = GetGcsRetryConfig();
}
GcsFileSystem::GcsFileSystem(
std::unique_ptr<AuthProvider> auth_provider,
std::unique_ptr<HttpRequest::Factory> http_request_factory,
std::unique_ptr<ZoneProvider> zone_provider, size_t block_size,
size_t max_bytes, uint64 max_staleness, uint64 stat_cache_max_age,
size_t stat_cache_max_entries, uint64 matching_paths_cache_max_age,
size_t matching_paths_cache_max_entries, RetryConfig retry_config,
TimeoutConfig timeouts, const std::unordered_set<string>& allowed_locations,
std::pair<const string, const string>* additional_header,
bool compose_append)
: timeouts_(timeouts),
retry_config_(retry_config),
auth_provider_(std::move(auth_provider)),
http_request_factory_(std::move(http_request_factory)),
zone_provider_(std::move(zone_provider)),
block_size_(block_size),
file_block_cache_(
MakeFileBlockCache(block_size, max_bytes, max_staleness)),
stat_cache_(new StatCache(stat_cache_max_age, stat_cache_max_entries)),
matching_paths_cache_(new MatchingPathsCache(
matching_paths_cache_max_age, matching_paths_cache_max_entries)),
bucket_location_cache_(new BucketLocationCache(
kCacheNeverExpire, kBucketLocationCacheMaxEntries)),
allowed_locations_(allowed_locations),
compose_append_(compose_append),
additional_header_(additional_header) {}
absl::Status GcsFileSystem::NewRandomAccessFile(
const string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object));
TF_RETURN_IF_ERROR(CheckBucketLocationConstraint(bucket));
if (cache_enabled_) {
result->reset(new GcsRandomAccessFile(fname, [this, bucket, object](
const string& fname,
uint64 offset, size_t n,
absl::string_view* result,
char* scratch) {
tf_shared_lock l(block_cache_lock_);
GcsFileStat stat;
TF_RETURN_IF_ERROR(stat_cache_->LookupOrCompute(
fname, &stat,
[this, bucket, object](const string& fname, GcsFileStat* stat) {
return UncachedStatForObject(fname, bucket, object, stat);
}));
if (!file_block_cache_->ValidateAndUpdateFileSignature(
fname, stat.generation_number)) {
VLOG(1)
<< "File signature has been changed. Refreshing the cache. Path: "
<< fname;
}
*result = absl::string_view();
size_t bytes_transferred;
TF_RETURN_IF_ERROR(file_block_cache_->Read(fname, offset, n, scratch,
&bytes_transferred));
*result = absl::string_view(scratch, bytes_transferred);
if (bytes_transferred < n) {
return errors::OutOfRange("EOF reached, ", result->size(),
" bytes were read out of ", n,
" bytes requested.");
}
return absl::OkStatus();
}));
} else {
result->reset(new BufferedGcsRandomAccessFile(
fname, block_size_,
[this, bucket, object](const string& fname, uint64 offset, size_t n,
absl::string_view* result, char* scratch) {
*result = absl::string_view();
size_t bytes_transferred;
TF_RETURN_IF_ERROR(
LoadBufferFromGCS(fname, offset, n, scratch, &bytes_transferred));
*result = absl::string_view(scratch, bytes_transferred);
if (bytes_transferred < n) {
return errors::OutOfRange("EOF reached, ", result->size(),
" bytes were read out of ", n,
" bytes requested.");
}
return absl::OkStatus();
}));
}
return absl::OkStatus();
}
void GcsFileSystem::ResetFileBlockCache(size_t block_size_bytes,
size_t max_bytes,
uint64 max_staleness_secs) {
mutex_lock l(block_cache_lock_);
file_block_cache_ =
MakeFileBlockCache(block_size_bytes, max_bytes, max_staleness_secs);
if (stats_ != nullptr) {
stats_->Configure(this, &throttle_, file_block_cache_.get());
}
}
std::unique_ptr<FileBlockCache> GcsFileSystem::MakeFileBlockCache(
size_t block_size, size_t max_bytes, uint64 max_staleness) {
std::unique_ptr<FileBlockCache> file_block_cache(new RamFileBlockCache(
block_size, max_bytes, max_staleness,
[this](const string& filename, size_t offset, size_t n, char* buffer,
size_t* bytes_transferred) {
return LoadBufferFromGCS(filename, offset, n, buffer,
bytes_transferred);
}));
cache_enabled_ = file_block_cache->IsCacheEnabled();
return file_block_cache;
}
absl::Status GcsFileSystem::LoadBufferFromGCS(const string& fname,
size_t offset, size_t n,
char* buffer,
size_t* bytes_transferred) {
*bytes_transferred = 0;
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object));
profiler::TraceMe activity(
[fname]() { return absl::StrCat("LoadBufferFromGCS ", fname); });
std::unique_ptr<HttpRequest> request;
TF_RETURN_WITH_CONTEXT_IF_ERROR(CreateHttpRequest(&request),
"when reading gs:
request->SetUri(strings::StrCat("https:
request->EscapeString(object)));
request->SetRange(offset, offset + n - 1);
request->SetResultBufferDirect(buffer, n);
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.read);
if (stats_ != nullptr) {
stats_->RecordBlockLoadRequest(fname, offset);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when reading gs:
bucket, "/", object);
size_t bytes_read = request->GetResultBufferDirectBytesTransferred();
*bytes_transferred = bytes_read;
VLOG(1) << "Successful read of gs:
<< offset << " of size: " << bytes_read;
activity.AppendMetadata([bytes_read]() {
return profiler::TraceMeEncode({{"block_size", bytes_read}});
});
if (stats_ != nullptr) {
stats_->RecordBlockRetrieved(fname, offset, bytes_read);
}
throttle_.RecordResponse(bytes_read);
if (bytes_read < n) {
GcsFileStat stat;
if (stat_cache_->Lookup(fname, &stat)) {
if (offset + bytes_read < stat.base.length) {
return errors::Internal(strings::Printf(
"File contents are inconsistent for file: %s @ %lu.", fname.c_str(),
offset));
}
VLOG(2) << "Successful integrity check for: gs:
<< object << " @ " << offset;
}
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::CreateNewUploadSession(
uint64 start_offset, const std::string& object_to_upload,
const std::string& bucket, uint64 file_size, const std::string& gcs_path,
UploadSessionHandle* session_handle) {
std::vector<char> output_buffer;
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
std::string uri = strings::StrCat(
kGcsUploadUriBase, "b/", bucket,
"/o?uploadType=resumable&name=", request->EscapeString(object_to_upload));
request->SetUri(uri);
request->AddHeader("X-Upload-Content-Length",
absl::StrCat(file_size - start_offset));
request->SetPostEmptyBody();
request->SetResultBuffer(&output_buffer);
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(),
" when initiating an upload to ", gcs_path);
if (session_handle != nullptr) {
session_handle->resumable = true;
session_handle->session_uri = request->GetResponseHeader("Location");
if (session_handle->session_uri.empty()) {
return errors::Internal("Unexpected response from GCS when writing to ",
gcs_path, ": 'Location' header not returned.");
}
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::UploadToSession(
const std::string& session_uri, uint64 start_offset,
uint64 already_uploaded, const std::string& tmp_content_filename,
uint64 file_size, const std::string& file_path) {
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
request->SetUri(session_uri);
if (file_size > 0) {
request->AddHeader("Content-Range",
strings::StrCat("bytes ", already_uploaded, "-",
file_size - start_offset - 1, "/",
file_size - start_offset));
}
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.write);
TF_RETURN_IF_ERROR(request->SetPutFromFile(tmp_content_filename,
start_offset + already_uploaded));
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when uploading ",
file_path);
return absl::OkStatus();
}
absl::Status GcsFileSystem::RequestUploadSessionStatus(
const string& session_uri, uint64 file_size, const std::string& gcs_path,
bool* completed, uint64* uploaded) {
CHECK(completed != nullptr) << "RequestUploadSessionStatus() called with out "
"param 'completed' == nullptr.";
CHECK(uploaded != nullptr) << "RequestUploadSessionStatus() called with out "
"param 'uploaded' == nullptr.";
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
request->SetUri(session_uri);
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
request->AddHeader("Content-Range", strings::StrCat("bytes */", file_size));
request->SetPutEmptyBody();
absl::Status status = request->Send();
if (status.ok()) {
*completed = true;
return absl::OkStatus();
}
*completed = false;
if (request->GetResponseCode() != HTTP_CODE_RESUME_INCOMPLETE) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(status, " when resuming upload ", gcs_path);
}
const std::string received_range = request->GetResponseHeader("Range");
if (received_range.empty()) {
*uploaded = 0;
} else {
absl::string_view range_piece(received_range);
absl::ConsumePrefix(&range_piece,
"bytes=");
auto return_error = [](const std::string& gcs_path,
const std::string& error_message) {
return errors::Internal("Unexpected response from GCS when writing ",
gcs_path, ": ", error_message);
};
std::vector<string> range_strs = str_util::Split(range_piece, '-');
if (range_strs.size() != 2) {
return return_error(gcs_path, "Range header '" + received_range +
"' could not be parsed.");
}
std::vector<int64_t> range_parts;
for (const std::string& range_str : range_strs) {
int64_t tmp;
if (strings::safe_strto64(range_str, &tmp)) {
range_parts.push_back(tmp);
} else {
return return_error(gcs_path, "Range header '" + received_range +
"' could not be parsed.");
}
}
if (range_parts[0] != 0) {
return return_error(gcs_path, "The returned range '" + received_range +
"' does not start at zero.");
}
*uploaded = range_parts[1] + 1;
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::ParseGcsPathForScheme(absl::string_view fname,
string scheme,
bool empty_object_ok,
string* bucket,
string* object) {
absl::string_view parsed_scheme, bucketp, objectp;
io::ParseURI(fname, &parsed_scheme, &bucketp, &objectp);
if (parsed_scheme != scheme) {
return errors::InvalidArgument("GCS path doesn't start with 'gs:
fname);
}
*bucket = string(bucketp);
if (bucket->empty() || *bucket == ".") {
return errors::InvalidArgument("GCS path doesn't contain a bucket name: ",
fname);
}
absl::ConsumePrefix(&objectp, "/");
*object = string(objectp);
if (!empty_object_ok && object->empty()) {
return errors::InvalidArgument("GCS path doesn't contain an object name: ",
fname);
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::ParseGcsPath(absl::string_view fname,
bool empty_object_ok, string* bucket,
string* object) {
return ParseGcsPathForScheme(fname, "gs", empty_object_ok, bucket, object);
}
void GcsFileSystem::ClearFileCaches(const string& fname) {
tf_shared_lock l(block_cache_lock_);
file_block_cache_->RemoveFile(fname);
stat_cache_->Delete(fname);
}
absl::Status GcsFileSystem::NewWritableFile(
const string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object));
auto session_creator =
[this](uint64 start_offset, const std::string& object_to_upload,
const std::string& bucket, uint64 file_size,
const std::string& gcs_path, UploadSessionHandle* session_handle) {
return CreateNewUploadSession(start_offset, object_to_upload, bucket,
file_size, gcs_path, session_handle);
};
auto object_uploader =
[this](const std::string& session_uri, uint64 start_offset,
uint64 already_uploaded, const std::string& tmp_content_filename,
uint64 file_size, const std::string& file_path) {
return UploadToSession(session_uri, start_offset, already_uploaded,
tmp_content_filename, file_size, file_path);
};
auto status_poller = [this](const string& session_uri, uint64 file_size,
const std::string& gcs_path, bool* completed,
uint64* uploaded) {
return RequestUploadSessionStatus(session_uri, file_size, gcs_path,
completed, uploaded);
};
auto generation_getter = [this](const string& fname, const string& bucket,
const string& object, int64* generation) {
GcsFileStat stat;
TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries(
[&fname, &bucket, &object, &stat, this]() {
return UncachedStatForObject(fname, bucket, object, &stat);
},
retry_config_));
*generation = stat.generation_number;
return absl::OkStatus();
};
result->reset(new GcsWritableFile(
bucket, object, this, &timeouts_,
[this, fname]() { ClearFileCaches(fname); }, retry_config_,
compose_append_, session_creator, object_uploader, status_poller,
generation_getter));
return absl::OkStatus();
}
absl::Status GcsFileSystem::NewAppendableFile(
const string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
std::unique_ptr<RandomAccessFile> reader;
TF_RETURN_IF_ERROR(NewRandomAccessFile(fname, token, &reader));
std::unique_ptr<char[]> buffer(new char[kReadAppendableFileBufferSize]);
absl::Status status;
uint64 offset = 0;
absl::string_view read_chunk;
string old_content_filename;
TF_RETURN_IF_ERROR(GetTmpFilename(&old_content_filename));
std::ofstream old_content(old_content_filename, std::ofstream::binary);
while (true) {
status = reader->Read(offset, kReadAppendableFileBufferSize, &read_chunk,
buffer.get());
if (status.ok()) {
old_content << read_chunk;
offset += kReadAppendableFileBufferSize;
} else if (status.code() == absl::StatusCode::kNotFound) {
break;
} else if (status.code() == absl::StatusCode::kOutOfRange) {
old_content << read_chunk;
break;
} else {
return status;
}
}
old_content.close();
auto session_creator =
[this](uint64 start_offset, const std::string& object_to_upload,
const std::string& bucket, uint64 file_size,
const std::string& gcs_path, UploadSessionHandle* session_handle) {
return CreateNewUploadSession(start_offset, object_to_upload, bucket,
file_size, gcs_path, session_handle);
};
auto object_uploader =
[this](const std::string& session_uri, uint64 start_offset,
uint64 already_uploaded, const std::string& tmp_content_filename,
uint64 file_size, const std::string& file_path) {
return UploadToSession(session_uri, start_offset, already_uploaded,
tmp_content_filename, file_size, file_path);
};
auto status_poller = [this](const string& session_uri, uint64 file_size,
const std::string& gcs_path, bool* completed,
uint64* uploaded) {
return RequestUploadSessionStatus(session_uri, file_size, gcs_path,
completed, uploaded);
};
auto generation_getter = [this](const string& fname, const string& bucket,
const string& object, int64* generation) {
GcsFileStat stat;
TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries(
[&fname, &bucket, &object, &stat, this]() {
return UncachedStatForObject(fname, bucket, object, &stat);
},
retry_config_));
*generation = stat.generation_number;
return absl::OkStatus();
};
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object));
result->reset(new GcsWritableFile(
bucket, object, this, old_content_filename, &timeouts_,
[this, fname]() { ClearFileCaches(fname); }, retry_config_,
compose_append_, session_creator, object_uploader, status_poller,
generation_getter));
return absl::OkStatus();
}
absl::Status GcsFileSystem::NewReadOnlyMemoryRegionFromFile(
const string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
uint64 size;
TF_RETURN_IF_ERROR(GetFileSize(fname, token, &size));
std::unique_ptr<char[]> data(new char[size]);
std::unique_ptr<RandomAccessFile> file;
TF_RETURN_IF_ERROR(NewRandomAccessFile(fname, token, &file));
absl::string_view piece;
TF_RETURN_IF_ERROR(file->Read(0, size, &piece, data.get()));
result->reset(new GcsReadOnlyMemoryRegion(std::move(data), size));
return absl::OkStatus();
}
absl::Status GcsFileSystem::FileExists(const string& fname,
TransactionToken* token) {
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, true, &bucket, &object));
if (object.empty()) {
bool result;
TF_RETURN_IF_ERROR(BucketExists(bucket, &result));
if (result) {
return absl::OkStatus();
} else {
return absl::NotFoundError(
absl::StrCat("The specified bucket ", fname, " was not found."));
}
}
GcsFileStat stat;
const absl::Status status = StatForObject(fname, bucket, object, &stat);
if (!absl::IsNotFound(status)) {
return status;
}
bool result;
TF_RETURN_IF_ERROR(FolderExists(fname, &result));
if (result) {
return absl::OkStatus();
}
return errors::NotFound("The specified path ", fname, " was not found.");
}
absl::Status GcsFileSystem::ObjectExists(const string& fname,
const string& bucket,
const string& object, bool* result) {
GcsFileStat stat;
const absl::Status status = StatForObject(fname, bucket, object, &stat);
switch (static_cast<int>(status.code())) {
case static_cast<int>(error::Code::OK):
*result = !stat.base.is_directory;
return absl::OkStatus();
case static_cast<int>(error::Code::NOT_FOUND):
*result = false;
return absl::OkStatus();
default:
return status;
}
}
absl::Status GcsFileSystem::UncachedStatForObject(const string& fname,
const string& bucket,
const string& object,
GcsFileStat* stat) {
std::vector<char> output_buffer;
std::unique_ptr<HttpRequest> request;
TF_RETURN_WITH_CONTEXT_IF_ERROR(CreateHttpRequest(&request),
" when reading metadata of gs:
"/", object);
request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket, "/o/",
request->EscapeString(object),
"?fields=size%2Cgeneration%2Cupdated"));
request->SetResultBuffer(&output_buffer);
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
if (stats_ != nullptr) {
stats_->RecordStatObjectRequest();
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
request->Send(), " when reading metadata of gs:
Json::Value root;
TF_RETURN_IF_ERROR(ParseJson(output_buffer, &root));
TF_RETURN_IF_ERROR(GetInt64Value(root, "size", &stat->base.length));
TF_RETURN_IF_ERROR(
GetInt64Value(root, "generation", &stat->generation_number));
string updated;
TF_RETURN_IF_ERROR(GetStringValue(root, "updated", &updated));
TF_RETURN_IF_ERROR(ParseRfc3339Time(updated, &(stat->base.mtime_nsec)));
VLOG(1) << "Stat of: gs:
<< " length: " << stat->base.length
<< " generation: " << stat->generation_number
<< "; mtime_nsec: " << stat->base.mtime_nsec
<< "; updated: " << updated;
if (absl::EndsWith(fname, "/")) {
stat->base.is_directory = true;
} else {
stat->base.is_directory = false;
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::StatForObject(const string& fname,
const string& bucket,
const string& object,
GcsFileStat* stat) {
if (object.empty()) {
return errors::InvalidArgument(strings::Printf(
"'object' must be a non-empty string. (File: %s)", fname.c_str()));
}
TF_RETURN_IF_ERROR(stat_cache_->LookupOrCompute(
fname, stat,
[this, &bucket, &object](const string& fname, GcsFileStat* stat) {
return UncachedStatForObject(fname, bucket, object, stat);
}));
return absl::OkStatus();
}
absl::Status GcsFileSystem::BucketExists(const string& bucket, bool* result) {
const absl::Status status = GetBucketMetadata(bucket, nullptr);
switch (static_cast<absl::StatusCode>(status.code())) {
case absl::StatusCode::kOk:
*result = true;
return absl::OkStatus();
case absl::StatusCode::kNotFound:
*result = false;
return absl::OkStatus();
default:
return status;
}
}
absl::Status GcsFileSystem::CheckBucketLocationConstraint(
const string& bucket) {
if (allowed_locations_.empty()) {
return absl::OkStatus();
}
if (allowed_locations_.erase(kDetectZoneSentinelValue) == 1) {
string zone;
TF_RETURN_IF_ERROR(zone_provider_->GetZone(&zone));
allowed_locations_.insert(ZoneToRegion(&zone));
}
string location;
TF_RETURN_IF_ERROR(GetBucketLocation(bucket, &location));
if (allowed_locations_.find(location) != allowed_locations_.end()) {
return absl::OkStatus();
}
return errors::FailedPrecondition(strings::Printf(
"Bucket '%s' is in '%s' location, allowed locations are: (%s).",
bucket.c_str(), location.c_str(),
absl::StrJoin(allowed_locations_, ", ").c_str()));
}
absl::Status GcsFileSystem::GetBucketLocation(const string& bucket,
string* location) {
auto compute_func = [this](const string& bucket, string* location) {
std::vector<char> result_buffer;
absl::Status status = GetBucketMetadata(bucket, &result_buffer);
Json::Value result;
TF_RETURN_IF_ERROR(ParseJson(result_buffer, &result));
string bucket_location;
TF_RETURN_IF_ERROR(
GetStringValue(result, kBucketMetadataLocationKey, &bucket_location));
*location = absl::AsciiStrToLower(bucket_location);
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
bucket_location_cache_->LookupOrCompute(bucket, location, compute_func));
return absl::OkStatus();
}
absl::Status GcsFileSystem::GetBucketMetadata(
const string& bucket, std::vector<char>* result_buffer) {
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket));
if (result_buffer != nullptr) {
request->SetResultBuffer(result_buffer);
}
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
return request->Send();
}
absl::Status GcsFileSystem::FolderExists(const string& dirname, bool* result) {
StatCache::ComputeFunc compute_func = [this](const string& dirname,
GcsFileStat* stat) {
std::vector<string> children;
TF_RETURN_IF_ERROR(
GetChildrenBounded(dirname, 1, &children, true ,
true ));
if (!children.empty()) {
stat->base = DIRECTORY_STAT;
return absl::OkStatus();
} else {
return errors::InvalidArgument("Not a directory!");
}
};
GcsFileStat stat;
absl::Status s = stat_cache_->LookupOrCompute(MaybeAppendSlash(dirname),
&stat, compute_func);
if (s.ok()) {
*result = stat.base.is_directory;
return absl::OkStatus();
}
if (absl::IsInvalidArgument(s)) {
*result = false;
return absl::OkStatus();
}
return s;
}
absl::Status GcsFileSystem::GetChildren(const string& dirname,
TransactionToken* token,
std::vector<string>* result) {
return GetChildrenBounded(dirname, UINT64_MAX, result,
false ,
false );
}
absl::Status GcsFileSystem::GetMatchingPaths(const string& pattern,
TransactionToken* token,
std::vector<string>* results) {
MatchingPathsCache::ComputeFunc compute_func =
[this](const string& pattern, std::vector<string>* results) {
results->clear();
const string& fixed_prefix =
pattern.substr(0, pattern.find_first_of("*?[\\"));
const string dir(this->Dirname(fixed_prefix));
if (dir.empty()) {
return errors::InvalidArgument(
"A GCS pattern doesn't have a bucket name: ", pattern);
}
std::vector<string> all_files;
TF_RETURN_IF_ERROR(GetChildrenBounded(
dir, UINT64_MAX, &all_files, true ,
false ));
const auto& files_and_folders = AddAllSubpaths(all_files);
const absl::string_view dir_no_slash = absl::StripSuffix(dir, "/");
for (const auto& path : files_and_folders) {
const string full_path = strings::StrCat(dir_no_slash, "/", path);
if (this->Match(full_path, pattern)) {
results->push_back(full_path);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
matching_paths_cache_->LookupOrCompute(pattern, results, compute_func));
return absl::OkStatus();
}
absl::Status GcsFileSystem::GetChildrenBounded(
const string& dirname, uint64 max_results, std::vector<string>* result,
bool recursive, bool include_self_directory_marker) {
if (!result) {
return errors::InvalidArgument("'result' cannot be null");
}
string bucket, object_prefix;
TF_RETURN_IF_ERROR(
ParseGcsPath(MaybeAppendSlash(dirname), true, &bucket, &object_prefix));
string nextPageToken;
uint64 retrieved_results = 0;
while (true) {
std::vector<char> output_buffer;
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
auto uri = strings::StrCat(kGcsUriBase, "b/", bucket, "/o");
if (recursive) {
uri = strings::StrCat(uri, "?fields=items%2Fname%2CnextPageToken");
} else {
uri = strings::StrCat(uri,
"?fields=items%2Fname%2Cprefixes%2CnextPageToken");
uri = strings::StrCat(uri, "&delimiter=%2F");
}
if (!object_prefix.empty()) {
uri = strings::StrCat(uri,
"&prefix=", request->EscapeString(object_prefix));
}
if (!nextPageToken.empty()) {
uri = strings::StrCat(
uri, "&pageToken=", request->EscapeString(nextPageToken));
}
if (max_results - retrieved_results < kGetChildrenDefaultPageSize) {
uri =
strings::StrCat(uri, "&maxResults=", max_results - retrieved_results);
}
request->SetUri(uri);
request->SetResultBuffer(&output_buffer);
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when reading ", dirname);
Json::Value root;
TF_RETURN_IF_ERROR(ParseJson(output_buffer, &root));
const auto items = root.get("items", Json::Value::null);
if (!items.isNull()) {
if (!items.isArray()) {
return errors::Internal(
"Expected an array 'items' in the GCS response.");
}
for (size_t i = 0; i < items.size(); i++) {
const auto item = items.get(i, Json::Value::null);
if (!item.isObject()) {
return errors::Internal(
"Unexpected JSON format: 'items' should be a list of objects.");
}
string name;
TF_RETURN_IF_ERROR(GetStringValue(item, "name", &name));
absl::string_view relative_path(name);
if (!absl::ConsumePrefix(&relative_path, object_prefix)) {
return errors::Internal(strings::StrCat(
"Unexpected response: the returned file name ", name,
" doesn't match the prefix ", object_prefix));
}
if (!relative_path.empty() || include_self_directory_marker) {
result->emplace_back(relative_path);
}
if (++retrieved_results >= max_results) {
return absl::OkStatus();
}
}
}
const auto prefixes = root.get("prefixes", Json::Value::null);
if (!prefixes.isNull()) {
if (!prefixes.isArray()) {
return errors::Internal(
"'prefixes' was expected to be an array in the GCS response.");
}
for (size_t i = 0; i < prefixes.size(); i++) {
const auto prefix = prefixes.get(i, Json::Value::null);
if (prefix.isNull() || !prefix.isString()) {
return errors::Internal(
"'prefixes' was expected to be an array of strings in the GCS "
"response.");
}
const string& prefix_str = prefix.asString();
absl::string_view relative_path(prefix_str);
if (!absl::ConsumePrefix(&relative_path, object_prefix)) {
return errors::Internal(
"Unexpected response: the returned folder name ", prefix_str,
" doesn't match the prefix ", object_prefix);
}
result->emplace_back(relative_path);
if (++retrieved_results >= max_results) {
return absl::OkStatus();
}
}
}
const auto token = root.get("nextPageToken", Json::Value::null);
if (token.isNull()) {
return absl::OkStatus();
}
if (!token.isString()) {
return errors::Internal(
"Unexpected response: nextPageToken is not a string");
}
nextPageToken = token.asString();
}
}
absl::Status GcsFileSystem::Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) {
if (!stat) {
return errors::Internal("'stat' cannot be nullptr.");
}
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, true, &bucket, &object));
if (object.empty()) {
bool is_bucket;
TF_RETURN_IF_ERROR(BucketExists(bucket, &is_bucket));
if (is_bucket) {
*stat = DIRECTORY_STAT;
return absl::OkStatus();
}
return errors::NotFound("The specified bucket ", fname, " was not found.");
}
GcsFileStat gcs_stat;
const absl::Status status = StatForObject(fname, bucket, object, &gcs_stat);
if (status.ok()) {
*stat = gcs_stat.base;
return absl::OkStatus();
}
if (!absl::IsNotFound(status)) {
return status;
}
bool is_folder;
TF_RETURN_IF_ERROR(FolderExists(fname, &is_folder));
if (is_folder) {
*stat = DIRECTORY_STAT;
return absl::OkStatus();
}
return errors::NotFound("The specified path ", fname, " was not found.");
}
absl::Status GcsFileSystem::DeleteFile(const string& fname,
TransactionToken* token) {
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object));
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket, "/o/",
request->EscapeString(object)));
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
request->SetDeleteRequest();
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when deleting ", fname);
ClearFileCaches(fname);
return absl::OkStatus();
}
absl::Status GcsFileSystem::CreateDir(const string& dirname,
TransactionToken* token) {
string dirname_with_slash = MaybeAppendSlash(dirname);
VLOG(3) << "CreateDir: creating directory with dirname: " << dirname
<< " and dirname_with_slash: " << dirname_with_slash;
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(dirname_with_slash, true,
&bucket, &object));
if (object.empty()) {
bool is_bucket;
TF_RETURN_IF_ERROR(BucketExists(bucket, &is_bucket));
return is_bucket ? absl::OkStatus()
: errors::NotFound("The specified bucket ",
dirname_with_slash, " was not found.");
}
if (FileExists(dirname_with_slash, token).ok()) {
VLOG(3) << "CreateDir: directory already exists, not uploading " << dirname;
return errors::AlreadyExists(dirname);
}
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
request->SetUri(strings::StrCat(
kGcsUploadUriBase, "b/", bucket,
"/o?uploadType=media&name=", request->EscapeString(object),
"&ifGenerationMatch=0"));
request->SetPostEmptyBody();
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
const absl::Status& status = request->Send();
if (status.ok()) {
VLOG(3) << "CreateDir: finished uploading directory " << dirname;
return absl::OkStatus();
}
if (request->GetResponseCode() != HTTP_CODE_PRECONDITION_FAILED) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(status, " when uploading ",
dirname_with_slash);
}
VLOG(3) << "Ignoring directory already exists on object "
<< dirname_with_slash;
return errors::AlreadyExists(dirname);
}
absl::Status GcsFileSystem::DeleteDir(const string& dirname,
TransactionToken* token) {
std::vector<string> children;
TF_RETURN_IF_ERROR(
GetChildrenBounded(dirname, 2, &children, true ,
true ));
if (children.size() > 1 || (children.size() == 1 && !children[0].empty())) {
return errors::FailedPrecondition("Cannot delete a non-empty directory.");
}
if (children.size() == 1 && children[0].empty()) {
return DeleteFile(MaybeAppendSlash(dirname), token);
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::GetFileSize(const string& fname,
TransactionToken* token,
uint64* file_size) {
if (!file_size) {
return errors::Internal("'file_size' cannot be nullptr.");
}
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object));
FileStatistics stat;
TF_RETURN_IF_ERROR(Stat(fname, token, &stat));
*file_size = stat.length;
return absl::OkStatus();
}
absl::Status GcsFileSystem::RenameFile(const string& src, const string& target,
TransactionToken* token) {
if (!IsDirectory(src, token).ok()) {
return RenameObject(src, target);
}
std::vector<string> children;
TF_RETURN_IF_ERROR(
GetChildrenBounded(src, UINT64_MAX, &children, true ,
true ));
for (const string& subpath : children) {
TF_RETURN_IF_ERROR(
RenameObject(JoinGcsPath(src, subpath), JoinGcsPath(target, subpath)));
}
return absl::OkStatus();
}
absl::Status GcsFileSystem::RenameObject(const string& src,
const string& target) {
VLOG(3) << "RenameObject: started gs:
string src_bucket, src_object, target_bucket, target_object;
TF_RETURN_IF_ERROR(ParseGcsPath(src, false, &src_bucket, &src_object));
TF_RETURN_IF_ERROR(
ParseGcsPath(target, false, &target_bucket, &target_object));
std::unique_ptr<HttpRequest> request;
TF_RETURN_IF_ERROR(CreateHttpRequest(&request));
request->SetUri(strings::StrCat(kGcsUriBase, "b/", src_bucket, "/o/",
request->EscapeString(src_object),
"/rewriteTo/b/", target_bucket, "/o/",
request->EscapeString(target_object)));
request->SetPostEmptyBody();
request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata);
std::vector<char> output_buffer;
request->SetResultBuffer(&output_buffer);
TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when renaming ", src,
" to ", target);
ClearFileCaches(target);
Json::Value root;
TF_RETURN_IF_ERROR(ParseJson(output_buffer, &root));
bool done;
TF_RETURN_IF_ERROR(GetBoolValue(root, "done", &done));
if (!done) {
return errors::Unimplemented(
"Couldn't rename ", src, " to ", target,
": moving large files between buckets with different "
"locations or storage classes is not supported.");
}
VLOG(3) << "RenameObject: finished from: gs:
return RetryingUtils::DeleteWithRetries(
[this, &src]() { return DeleteFile(src, nullptr); }, retry_config_);
}
absl::Status GcsFileSystem::IsDirectory(const string& fname,
TransactionToken* token) {
string bucket, object;
TF_RETURN_IF_ERROR(ParseGcsPath(fname, true, &bucket, &object));
if (object.empty()) {
bool is_bucket;
TF_RETURN_IF_ERROR(BucketExists(bucket, &is_bucket));
if (is_bucket) {
return absl::OkStatus();
}
return errors::NotFound("The specified bucket gs:
" was not found.");
}
bool is_folder;
TF_RETURN_IF_ERROR(FolderExists(fname, &is_folder));
if (is_folder) {
return absl::OkStatus();
}
bool is_object;
TF_RETURN_IF_ERROR(ObjectExists(fname, bucket, object, &is_object));
if (is_object) {
return errors::FailedPrecondition("The specified path ", fname,
" is not a directory.");
}
return errors::NotFound("The specified path ", fname, " was not found.");
}
absl::Status GcsFileSystem::DeleteRecursively(const string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
if (!undeleted_files || !undeleted_dirs) {
return errors::Internal(
"'undeleted_files' and 'undeleted_dirs' cannot be nullptr.");
}
*undeleted_files = 0;
*undeleted_dirs = 0;
if (!IsDirectory(dirname, token).ok()) {
*undeleted_dirs = 1;
return absl::Status(
absl::StatusCode::kNotFound,
strings::StrCat(dirname, " doesn't exist or not a directory."));
}
std::vector<string> all_objects;
TF_RETURN_IF_ERROR(GetChildrenBounded(
dirname, UINT64_MAX, &all_objects, true ,
true ));
for (const string& object : all_objects) {
const string& full_path = JoinGcsPath(dirname, object);
const auto& delete_file_status = RetryingUtils::DeleteWithRetries(
[this, &full_path, token]() { return DeleteFile(full_path, token); },
retry_config_);
if (!delete_file_status.ok()) {
if (IsDirectory(full_path, token).ok()) {
(*undeleted_dirs)++;
} else {
(*undeleted_files)++;
}
}
}
return absl::OkStatus();
}
void GcsFileSystem::FlushCaches(TransactionToken* token) {
tf_shared_lock l(block_cache_lock_);
file_block_cache_->Flush();
stat_cache_->Clear();
matching_paths_cache_->Clear();
bucket_location_cache_->Clear();
}
void GcsFileSystem::SetStats(GcsStatsInterface* stats) {
CHECK(stats_ == nullptr) << "SetStats() has already been called.";
CHECK(stats != nullptr);
mutex_lock l(block_cache_lock_);
stats_ = stats;
stats_->Configure(this, &throttle_, file_block_cache_.get());
}
void GcsFileSystem::SetCacheStats(FileBlockCacheStatsInterface* cache_stats) {
tf_shared_lock l(block_cache_lock_);
if (file_block_cache_ == nullptr) {
LOG(ERROR) << "Tried to set cache stats of non-initialized file block "
"cache object. This may result in not exporting the intended "
"monitoring data";
return;
}
file_block_cache_->SetStats(cache_stats);
}
void GcsFileSystem::SetAuthProvider(
std::unique_ptr<AuthProvider> auth_provider) {
mutex_lock l(mu_);
auth_provider_ = std::move(auth_provider);
}
absl::Status GcsFileSystem::CreateHttpRequest(
std::unique_ptr<HttpRequest>* request) {
std::unique_ptr<HttpRequest> new_request{http_request_factory_->Create()};
if (dns_cache_) {
dns_cache_->AnnotateRequest(new_request.get());
}
string auth_token;
{
tf_shared_lock l(mu_);
TF_RETURN_IF_ERROR(
AuthProvider::GetToken(auth_provider_.get(), &auth_token));
}
new_request->AddAuthBearerHeader(auth_token);
if (additional_header_) {
new_request->AddHeader(additional_header_->first,
additional_header_->second);
}
if (stats_ != nullptr) {
new_request->SetRequestStats(stats_->HttpStats());
}
if (!throttle_.AdmitRequest()) {
return errors::Unavailable("Request throttled");
}
*request = std::move(new_request);
return absl::OkStatus();
}
RetryingGcsFileSystem::RetryingGcsFileSystem()
: RetryingFileSystem(std::make_unique<GcsFileSystem>(),
RetryConfig(GetGcsRetryConfig())) {}
}
REGISTER_LEGACY_FILE_SYSTEM("gs", ::tsl::RetryingGcsFileSystem); | #include "tsl/platform/cloud/gcs_file_system.h"
#include <fstream>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
#ifdef PLATFORM_WINDOWS
#undef DeleteFile
#endif
namespace tsl {
namespace {
static GcsFileSystem::TimeoutConfig kTestTimeoutConfig(5, 1, 10, 20, 30);
static RetryConfig kTestRetryConfig(0 );
static std::unordered_set<string>* kAllowedLocationsDefault =
new std::unordered_set<string>();
static std::unordered_set<string>* kAllowedLocationsAuto =
new std::unordered_set<string>({"auto"});
class FakeAuthProvider : public AuthProvider {
public:
absl::Status GetToken(string* token) override {
*token = "fake_token";
return absl::OkStatus();
}
};
class FakeZoneProvider : public ZoneProvider {
public:
absl::Status GetZone(string* zone) override {
*zone = "us-east1-b";
return absl::OkStatus();
}
};
TEST(GcsFileSystemTest, NewRandomAccessFile_NoBlockCache) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-5\n"
"Timeouts: 5 1 20\n",
"012345"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 6-11\n"
"Timeouts: 5 1 20\n",
"6789")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[6];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("012345", result);
EXPECT_TRUE(errors::IsOutOfRange(
file->Read(sizeof(scratch), sizeof(scratch), &result, scratch)));
EXPECT_EQ("6789", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered) {
std::vector<HttpRequest*> requests({
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"0123456789"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 10-19\n"
"Timeouts: 5 1 20\n",
""),
});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[6];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("012345", result);
EXPECT_TRUE(errors::IsOutOfRange(
file->Read(sizeof(scratch), sizeof(scratch), &result, scratch)));
EXPECT_EQ("6789", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_Errors) {
std::vector<HttpRequest*> requests({
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"Server Not", errors::Unavailable("important HTTP error 308"),
nullptr, {}, 308),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 6-15\n"
"Timeouts: 5 1 20\n",
"123"),
});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[6];
absl::string_view result;
EXPECT_TRUE(
errors::IsUnavailable(file->Read(0, sizeof(scratch), &result, scratch)));
EXPECT_EQ("", result);
EXPECT_TRUE(errors::IsOutOfRange(
file->Read(sizeof(scratch), sizeof(scratch), &result, scratch)));
EXPECT_EQ("123", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_ReadAtEOF) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"0123456789"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 10-19\n"
"Timeouts: 5 1 20\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[10];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("0123456789", result);
EXPECT_TRUE(errors::IsOutOfRange(
file->Read(sizeof(scratch), sizeof(scratch), &result, scratch)));
EXPECT_EQ("", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_CachedOutOfRange) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"012345678")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[5];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("01234", result);
TF_EXPECT_OK(file->Read(4, sizeof(scratch), &result, scratch));
EXPECT_EQ("45678", result);
EXPECT_TRUE(
errors::IsOutOfRange(file->Read(5, sizeof(scratch), &result, scratch)));
EXPECT_EQ("5678", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_CachedNotSequential) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 1-10\n"
"Timeouts: 5 1 20\n",
"12345678"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"012345678")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[5];
absl::string_view result;
TF_EXPECT_OK(file->Read(1, sizeof(scratch), &result, scratch));
EXPECT_EQ("12345", result);
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("01234", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_Growing) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"012345678"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 9-18\n"
"Timeouts: 5 1 20\n",
"9")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[10];
absl::string_view result;
EXPECT_TRUE(
errors::IsOutOfRange(file->Read(0, sizeof(scratch), &result, scratch)));
EXPECT_EQ("012345678", result);
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("0123456789", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_ReadBackwards) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 5-14\n"
"Timeouts: 5 1 20\n",
"56789"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-9\n"
"Timeouts: 5 1 20\n",
"0123456789")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
absl::string_view filename;
TF_EXPECT_OK(file->Name(&filename));
EXPECT_EQ(filename, "gs:
char scratch[10];
absl::string_view result;
EXPECT_TRUE(
errors::IsOutOfRange(file->Read(5, sizeof(scratch), &result, scratch)));
EXPECT_EQ("56789", result);
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("0123456789", result);
}
TEST(GcsFileSystemTest,
NewRandomAccessFile_WithLocationConstraintInSameLocation) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
R"(
{
"location":"US-EAST1"
})")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsAuto,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
}
TEST(GcsFileSystemTest, NewRandomAccessFile_WithLocationConstraintCaching) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
R"(
{
"location":"US-EAST1"
})"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
R"(
{
"location":"US-EAST1"
})"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
R"(
{
"location":"US-EAST1"
})")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsAuto,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
string bucket = "gs:
string another_bucket = "gs:
TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file));
TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file));
TF_EXPECT_OK(fs.NewRandomAccessFile(another_bucket, nullptr, &file));
TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file));
TF_EXPECT_OK(fs.NewRandomAccessFile(another_bucket, nullptr, &file));
fs.FlushCaches(nullptr);
TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file));
}
TEST(GcsFileSystemTest,
NewRandomAccessFile_WithLocationConstraintInDifferentLocation) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
R"(
{
"location":"BARFOO"
})")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsAuto,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
EXPECT_EQ(
errors::FailedPrecondition(
"Bucket 'bucket' is in 'barfoo' location, allowed locations "
"are: (us-east1)."),
fs.NewRandomAccessFile("gs:
}
TEST(GcsFileSystemTest, NewRandomAccessFile_NoBlockCache_DifferentN) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-2\n"
"Timeouts: 5 1 20\n",
"012"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 3-12\n"
"Timeouts: 5 1 20\n",
"3456789")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
char small_scratch[3];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(small_scratch), &result, small_scratch));
EXPECT_EQ("012", result);
char large_scratch[10];
EXPECT_TRUE(errors::IsOutOfRange(file->Read(
sizeof(small_scratch), sizeof(large_scratch), &result, large_scratch)));
EXPECT_EQ("3456789", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"random_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"15\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-8\n"
"Timeouts: 5 1 20\n",
"012345678"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 9-17\n"
"Timeouts: 5 1 20\n",
"9abcde"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 18-26\n"
"Timeouts: 5 1 20\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 9 ,
18 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
char scratch[100];
absl::string_view result;
{
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(fs.NewRandomAccessFile("gs:
nullptr, &file));
scratch[5] = 'x';
TF_EXPECT_OK(file->Read(0, 4, &result, scratch));
EXPECT_EQ("0123", result);
EXPECT_EQ(scratch[5], 'x');
TF_EXPECT_OK(file->Read(4, 4, &result, scratch));
EXPECT_EQ("4567", result);
TF_EXPECT_OK(file->Read(6, 5, &result, scratch));
EXPECT_EQ("6789a", result);
EXPECT_TRUE(errors::IsOutOfRange(file->Read(6, 10, &result, scratch)));
EXPECT_EQ("6789abcde", result);
EXPECT_TRUE(errors::IsOutOfRange(file->Read(20, 10, &result, scratch)));
EXPECT_TRUE(result.empty());
TF_EXPECT_OK(file->Read(0, 4, &result, scratch));
}
EXPECT_EQ("0123", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache_Flush) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"random_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"15\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-8\n"
"Timeouts: 5 1 20\n",
"012345678"),
new FakeHttpRequest(
"Uri: https:
"random_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"15\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-8\n"
"Timeouts: 5 1 20\n",
"012345678")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 9 ,
18 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
char scratch[100];
absl::string_view result;
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
scratch[5] = 'x';
TF_EXPECT_OK(file->Read(0, 4, &result, scratch));
EXPECT_EQ("0123", result);
EXPECT_EQ(scratch[5], 'x');
fs.FlushCaches(nullptr);
TF_EXPECT_OK(file->Read(4, 4, &result, scratch));
EXPECT_EQ("4567", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache_MaxStaleness) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"object?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"16\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Range: 0-7\n"
"Timeouts: 5 1 20\n",
"01234567"),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Range: 8-15\n"
"Timeouts: 5 1 20\n",
"89abcdef")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 8 ,
16 , 3600 ,
3600 , 0 ,
0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
char scratch[100];
absl::string_view result;
for (int i = 0; i < 10; i++) {
std::unique_ptr<RandomAccessFile> file1;
std::unique_ptr<RandomAccessFile> file2;
TF_EXPECT_OK(fs.NewRandomAccessFile("gs:
TF_EXPECT_OK(fs.NewRandomAccessFile("gs:
TF_EXPECT_OK(file1->Read(0, 8, &result, scratch));
EXPECT_EQ("01234567", result);
TF_EXPECT_OK(file2->Read(0, 8, &result, scratch));
EXPECT_EQ("01234567", result);
TF_EXPECT_OK(file2->Read(8, 8, &result, scratch));
EXPECT_EQ("89abcdef", result);
TF_EXPECT_OK(file1->Read(8, 8, &result, scratch));
EXPECT_EQ("89abcdef", result);
}
}
TEST(GcsFileSystemTest,
NewRandomAccessFile_WithBlockCache_FileSignatureChanges) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"random_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"5\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-8\n"
"Timeouts: 5 1 20\n",
"01234"),
new FakeHttpRequest(
"Uri: https:
"random_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"5\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-8\n"
"Timeouts: 5 1 20\n",
"43210")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 9 ,
18 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
char scratch[5];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("01234", result);
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("43210", result);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_NoObjectName) {
std::vector<HttpRequest*> requests;
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider),
0 , 0 , 0 ,
0 , 0 ,
0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> file;
EXPECT_TRUE(errors::IsInvalidArgument(
fs.NewRandomAccessFile("gs:
}
TEST(GcsFileSystemTest, NewRandomAccessFile_InconsistentRead) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"random_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"6\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-5\n"
"Timeouts: 5 1 20\n",
"012")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 1e3 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
TF_ASSERT_OK(fs.Stat("gs:
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(
fs.NewRandomAccessFile("gs:
char scratch[6];
absl::string_view result;
EXPECT_TRUE(
errors::IsInternal(file->Read(0, sizeof(scratch), &result, scratch)));
}
TEST(GcsFileSystemTest, NewWritableFile) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"16\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-7\n"
"Timeouts: 5 1 20\n",
"01234567"),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
""),
new FakeHttpRequest(
"Uri: https:
"path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"33\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:15:34.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-7\n"
"Timeouts: 5 1 20\n",
"01234567")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 8 ,
8 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> rfile;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
char scratch[100];
absl::string_view result;
TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch));
EXPECT_EQ("0123", result);
std::unique_ptr<WritableFile> wfile;
TF_EXPECT_OK(
fs.NewWritableFile("gs:
TF_EXPECT_OK(wfile->Append("content1,"));
int64_t pos;
TF_EXPECT_OK(wfile->Tell(&pos));
EXPECT_EQ(9, pos);
TF_EXPECT_OK(wfile->Append("content2"));
TF_EXPECT_OK(wfile->Flush());
TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch));
EXPECT_EQ("0123", result);
TF_EXPECT_OK(wfile->Flush());
TF_EXPECT_OK(wfile->Sync());
TF_EXPECT_OK(wfile->Close());
}
TEST(GcsFileSystemTest, NewWritableFile_ResumeUploadSucceeds) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable.txt\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header Content-Range: bytes */17\n"
"Put: yes\n",
"", errors::Unavailable("308"), nullptr,
{{"Range", "0-10"}}, 308),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 11-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: ntent2\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header Content-Range: bytes */17\n"
"Put: yes\n",
"", errors::Unavailable("308"), nullptr,
{{"Range", "bytes=0-12"}}, 308),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 13-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: ent2\n",
"", errors::Unavailable("308"), 308),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header Content-Range: bytes */17\n"
"Put: yes\n",
"", errors::Unavailable("308"), nullptr,
{{"Range", "bytes=0-14"}}, 308),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 15-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: t2\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<WritableFile> file;
TF_EXPECT_OK(
fs.NewWritableFile("gs:
TF_EXPECT_OK(file->Append("content1,"));
TF_EXPECT_OK(file->Append("content2"));
TF_EXPECT_OK(file->Close());
}
TEST(GcsFileSystemTest, NewWritableFile_ResumeUploadSucceedsOnGetStatus) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"16\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-7\n"
"Timeouts: 5 1 20\n",
"01234567"),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header Content-Range: bytes */17\n"
"Put: yes\n",
"", absl::OkStatus(), nullptr, {}, 201),
new FakeHttpRequest(
"Uri: https:
"path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"33\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:19:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-7\n"
"Timeouts: 5 1 20\n",
"01234567")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 8 ,
8 , 3600 ,
3600 , 0 ,
0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<RandomAccessFile> rfile;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
char scratch[100];
absl::string_view result;
TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch));
EXPECT_EQ("0123", result);
std::unique_ptr<WritableFile> wfile;
TF_EXPECT_OK(
fs.NewWritableFile("gs:
TF_EXPECT_OK(wfile->Append("content1,"));
TF_EXPECT_OK(wfile->Append("content2"));
TF_EXPECT_OK(rfile->Read(4, 4, &result, scratch));
EXPECT_EQ("4567", result);
TF_EXPECT_OK(wfile->Close());
TF_EXPECT_OK(rfile->Read(0, 8, &result, scratch));
EXPECT_EQ("01234567", result);
}
TEST(GcsFileSystemTest, NewWritableFile_ResumeUploadAllAttemptsFail) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable.txt\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
"", errors::Unavailable("503"), 503)});
for (int i = 0; i < 10; i++) {
requests.emplace_back(
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header Content-Range: bytes */17\n"
"Put: yes\n",
"", errors::Unavailable("important HTTP error 308"),
nullptr, {{"Range", "0-10"}}, 308));
requests.emplace_back(new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 11-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: ntent2\n",
"", errors::Unavailable("important HTTP error 503"), 503));
}
requests.emplace_back(new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable.txt\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
requests.emplace_back(
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
""));
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 ,
RetryConfig(2 ), kTestTimeoutConfig,
*kAllowedLocationsDefault, nullptr ,
false );
std::unique_ptr<WritableFile> file;
TF_EXPECT_OK(
fs.NewWritableFile("gs:
TF_EXPECT_OK(file->Append("content1,"));
TF_EXPECT_OK(file->Append("content2"));
const auto& status = file->Close();
EXPECT_TRUE(errors::IsAborted(status));
EXPECT_TRUE(
absl::StrContains(status.message(),
"All 10 retry attempts failed. The last failure: "
"important HTTP error 503"))
<< status;
}
TEST(GcsFileSystemTest, NewWritableFile_UploadReturns410) {
std::vector<string> results;
TF_EXPECT_OK(
Env::Default()->GetMatchingPaths("/tmp/tmp_file_tensorflow*", &results));
const int64_t tmp_files_before = results.size();
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable.txt\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
"", errors::NotFound("important HTTP error 410"),
410),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fwriteable.txt\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
{
std::unique_ptr<WritableFile> file;
TF_EXPECT_OK(
fs.NewWritableFile("gs:
TF_EXPECT_OK(file->Append("content1,"));
TF_EXPECT_OK(file->Append("content2"));
const auto& status = file->Close();
EXPECT_TRUE(errors::IsUnavailable(status));
EXPECT_TRUE(
absl::StrContains(status.message(),
"Upload to gs:
"caused by: important HTTP error 410"))
<< status;
EXPECT_TRUE(absl::StrContains(
status.message(), "when uploading gs:
<< status;
}
results.clear();
TF_EXPECT_OK(
Env::Default()->GetMatchingPaths("/tmp/tmp_file_tensorflow*", &results));
EXPECT_EQ(tmp_files_before, results.size());
}
TEST(GcsFileSystemTest, NewWritableFile_NoObjectName) {
std::vector<HttpRequest*> requests;
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<WritableFile> file;
EXPECT_TRUE(errors::IsInvalidArgument(
fs.NewWritableFile("gs:
}
TEST(GcsFileSystemTest, NewAppendableFile) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Fappendable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-1048575\n"
"Timeouts: 5 1 20\n",
"content1,"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-31\n"
"Timeouts: 5 1 20\n",
"content1,"),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fappendable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 17\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-16/17\n"
"Timeouts: 5 1 30\n"
"Put body: content1,content2\n",
""),
new FakeHttpRequest(
"Uri: https:
"path%2Fappendable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:25:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-31\n"
"Timeouts: 5 1 20\n",
"01234567")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 32 ,
32 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<WritableFile> wfile;
TF_EXPECT_OK(
fs.NewAppendableFile("gs:
TF_EXPECT_OK(wfile->Append("content2"));
std::unique_ptr<RandomAccessFile> rfile;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
char scratch[100];
absl::string_view result;
TF_EXPECT_OK(rfile->Read(0, 8, &result, scratch));
EXPECT_EQ("content1", result);
TF_EXPECT_OK(wfile->Close());
TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch));
EXPECT_EQ("0123", result);
}
TEST(GcsFileSystemTest, NewAppendableFile_NoObjectName) {
std::vector<HttpRequest*> requests;
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<WritableFile> file;
EXPECT_TRUE(errors::IsInvalidArgument(
fs.NewAppendableFile("gs:
}
TEST(GcsFileSystemTest, NewAppendableFile_ObjectDoesNotExist) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-1048575\n"
"Timeouts: 5 1 20\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"?uploadType=resumable&name=filename\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 0\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<WritableFile> file;
TF_EXPECT_OK(fs.NewAppendableFile("gs:
}
TEST(GcsFileSystemTest, NewReadOnlyMemoryRegionFromFile) {
const string content = "file content";
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Frandom_access.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"", content.size(), "\"",
", \"generation\": \"1\"",
", \"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
strings::StrCat("Uri: https:
"path%2Frandom_access.txt\n"
"Auth Token: fake_token\n"
"Range: 0-",
content.size() - 1, "\n", "Timeouts: 5 1 20\n"),
content)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<ReadOnlyMemoryRegion> region;
TF_EXPECT_OK(fs.NewReadOnlyMemoryRegionFromFile(
"gs:
EXPECT_EQ(content,
absl::string_view(reinterpret_cast<const char*>(region->data()),
region->length()));
}
TEST(GcsFileSystemTest, NewReadOnlyMemoryRegionFromFile_NoObjectName) {
std::vector<HttpRequest*> requests;
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<ReadOnlyMemoryRegion> region;
EXPECT_TRUE(errors::IsInvalidArgument(
fs.NewReadOnlyMemoryRegionFromFile("gs:
}
TEST(GcsFileSystemTest, FileExists_YesAsObject) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.FileExists("gs:
}
TEST(GcsFileSystemTest, FileExists_YesAsFolder) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Fsubfolder?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubfolder%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/subfolder/\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.FileExists("gs:
}
TEST(GcsFileSystemTest, FileExists_YesAsBucket) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"size\": \"100\"}"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"size\": \"100\"}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.FileExists("gs:
TF_EXPECT_OK(fs.FileExists("gs:
}
TEST(GcsFileSystemTest, FileExists_NotAsObjectOrFolder) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Ffile1.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": []}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_TRUE(
errors::IsNotFound(fs.FileExists("gs:
}
TEST(GcsFileSystemTest, FileExists_NotAsBucket) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_TRUE(absl::IsNotFound(fs.FileExists("gs:
EXPECT_TRUE(absl::IsNotFound(fs.FileExists("gs:
}
TEST(GcsFileSystemTest, FileExists_StatCache) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"path%2Fsubfolder%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubfolder%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/subfolder/\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
for (int i = 0; i < 10; i++) {
TF_EXPECT_OK(fs.FileExists("gs:
TF_EXPECT_OK(fs.FileExists("gs:
}
}
TEST(GcsFileSystemTest, FileExists_DirectoryMark) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"dir%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"5\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.FileExists("gs:
TF_EXPECT_OK(fs.IsDirectory("gs:
}
TEST(GcsFileSystemTest, GetChildren_NoItems) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix="
"path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"prefixes\": [\"path/subpath/\"]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(std::vector<string>({"subpath/"}), children);
}
TEST(GcsFileSystemTest, GetChildren_ThreeFiles) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix="
"path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" },"
" { \"name\": \"path/file3.txt\" }],"
"\"prefixes\": [\"path/subpath/\"]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(std::vector<string>({"file1.txt", "file3.txt", "subpath/"}),
children);
}
TEST(GcsFileSystemTest, GetChildren_SelfDirectoryMarker) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix="
"path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/\" },"
" { \"name\": \"path/file3.txt\" }],"
"\"prefixes\": [\"path/subpath/\"]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(std::vector<string>({"file3.txt", "subpath/"}), children);
}
TEST(GcsFileSystemTest, GetChildren_ThreeFiles_NoSlash) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix="
"path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" },"
" { \"name\": \"path/file3.txt\" }],"
"\"prefixes\": [\"path/subpath/\"]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(std::vector<string>({"file1.txt", "file3.txt", "subpath/"}),
children);
}
TEST(GcsFileSystemTest, GetChildren_Root) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(0, children.size());
}
TEST(GcsFileSystemTest, GetChildren_Empty) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix="
"path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(0, children.size());
}
TEST(GcsFileSystemTest, GetChildren_Pagination) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&"
"prefix=path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"nextPageToken\": \"ABCD==\", "
"\"items\": [ "
" { \"name\": \"path/file1.txt\" },"
" { \"name\": \"path/file3.txt\" }],"
"\"prefixes\": [\"path/subpath/\"]}"),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&"
"prefix=path%2F"
"&pageToken=ABCD==\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file4.txt\" },"
" { \"name\": \"path/file5.txt\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> children;
TF_EXPECT_OK(fs.GetChildren("gs:
EXPECT_EQ(std::vector<string>({"file1.txt", "file3.txt", "subpath/",
"file4.txt", "file5.txt"}),
children);
}
TEST(GcsFileSystemTest, GetMatchingPaths_NoWildcard) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubpath%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/subpath/file2.txt\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
nullptr, &result));
EXPECT_EQ(std::vector<string>({"gs:
result);
}
TEST(GcsFileSystemTest, GetMatchingPaths_BucketAndWildcard) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" },"
" { \"name\": \"path/subpath/file2.txt\" },"
" { \"name\": \"path/file3.txt\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(
fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
TF_EXPECT_OK(
fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::vector<string> result;
EXPECT_TRUE(errors::IsInvalidArgument(
fs.GetMatchingPaths("gs:/,
0 , 0 , 0 ,
0 , 3600 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
for (int i = 0; i < 10; i++) {
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
nullptr, &result));
EXPECT_EQ(std::vector<string>({"gs:
result);
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
0 , 0 , 0 ,
0 , 3600 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
for (int i = 0; i < 10; i++) {
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
nullptr, &result));
EXPECT_EQ(std::vector<string>({"gs:
result);
}
fs.FlushCaches(nullptr);
for (int i = 0; i < 10; i++) {
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
nullptr, &result));
EXPECT_EQ(std::vector<string>({"gs:
result);
}
}
TEST(GcsFileSystemTest, DeleteFile) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-15\n"
"Timeouts: 5 1 20\n",
"01234567"),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Ffile1.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:19:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-15\n"
"Timeouts: 5 1 20\n",
"76543210")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 16 ,
16 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
char scratch[100];
absl::string_view result;
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
TF_EXPECT_OK(file->Read(0, 8, &result, scratch));
EXPECT_EQ("01234567", result);
TF_EXPECT_OK(fs.DeleteFile("gs:
TF_EXPECT_OK(file->Read(0, 8, &result, scratch));
EXPECT_EQ("76543210", result);
}
TEST(GcsFileSystemTest, DeleteFile_NoObjectName) {
std::vector<HttpRequest*> requests;
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_TRUE(
errors::IsInvalidArgument(fs.DeleteFile("gs:
}
TEST(GcsFileSystemTest, DeleteFile_StatCacheRemoved) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest("Uri: https:
"/bucket/o/file.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=file.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 16 ,
16 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat_before_deletion;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(1010, stat_before_deletion.length);
TF_EXPECT_OK(fs.DeleteFile("gs:
FileStatistics stat_after_deletion;
EXPECT_EQ(
error::Code::NOT_FOUND,
fs.Stat("gs:
}
TEST(GcsFileSystemTest, DeleteDir_Empty) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F&maxResults=2\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.DeleteDir("gs:
}
TEST(GcsFileSystemTest, DeleteDir_OnlyDirMarkerLeft) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F&maxResults=2\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/\" }]}"),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.DeleteDir("gs:
}
TEST(GcsFileSystemTest, DeleteDir_BucketOnly) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"name%2CnextPageToken&maxResults=2\nAuth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.DeleteDir("gs:
}
TEST(GcsFileSystemTest, DeleteDir_NonEmpty) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F&maxResults=2\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_EQ(error::Code::FAILED_PRECONDITION,
fs.DeleteDir("gs:
}
TEST(GcsFileSystemTest, GetFileSize) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
uint64 size;
TF_EXPECT_OK(fs.GetFileSize("gs:
EXPECT_EQ(1010, size);
}
TEST(GcsFileSystemTest, GetFileSize_NoObjectName) {
std::vector<HttpRequest*> requests;
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
uint64 size;
EXPECT_TRUE(errors::IsInvalidArgument(
fs.GetFileSize("gs:
}
TEST(GcsFileSystemTest, RenameFile_Folder) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path1%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path1/subfolder/file1.txt\" }]}"),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path1%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path1/\" },"
" { \"name\": \"path1/subfolder/file1.txt\" },"
" { \"name\": \"path1/file2.txt\" }]}"),
new FakeHttpRequest(
"Uri: https:
"path1%2F/rewriteTo/b/bucket/o/path2%2F\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": true}"),
new FakeHttpRequest(
"Uri: https:
"path1%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"path1%2Fsubfolder%2Ffile1.txt/rewriteTo/b/bucket/o/"
"path2%2Fsubfolder%2Ffile1.txt\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": true}"),
new FakeHttpRequest(
"Uri: https:
"path1%2Fsubfolder%2Ffile1.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"path1%2Ffile2.txt/rewriteTo/b/bucket/o/path2%2Ffile2.txt\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": true}"),
new FakeHttpRequest(
"Uri: https:
"path1%2Ffile2.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(
fs.RenameFile("gs:
}
TEST(GcsFileSystemTest, RenameFile_Object) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-15\n"
"Timeouts: 5 1 20\n",
"01234567"),
new FakeHttpRequest(
"Uri: https:
"path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-15\n"
"Timeouts: 5 1 20\n",
"76543210"),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": true}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-15\n"
"Timeouts: 5 1 20\n",
"89abcdef"),
new FakeHttpRequest(
"Uri: https:
"path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"2\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-15\n"
"Timeouts: 5 1 20\n",
"fedcba98")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 16 ,
64 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
char scratch[100];
absl::string_view result;
std::unique_ptr<RandomAccessFile> src;
std::unique_ptr<RandomAccessFile> dst;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
TF_EXPECT_OK(src->Read(0, 8, &result, scratch));
EXPECT_EQ("01234567", result);
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
TF_EXPECT_OK(dst->Read(0, 8, &result, scratch));
EXPECT_EQ("76543210", result);
TF_EXPECT_OK(fs.RenameFile("gs:
"gs:
TF_EXPECT_OK(src->Read(0, 8, &result, scratch));
EXPECT_EQ("89abcdef", result);
TF_EXPECT_OK(dst->Read(0, 8, &result, scratch));
EXPECT_EQ("fedcba98", result);
}
TEST(GcsFileSystemTest, RenameFile_Object_FlushTargetStatCache) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1000\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": true}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat_before_renaming;
TF_EXPECT_OK(
fs.Stat("gs:
EXPECT_EQ(1000, stat_before_renaming.length);
TF_EXPECT_OK(fs.RenameFile("gs:
"gs:
FileStatistics stat_after_renaming;
TF_EXPECT_OK(
fs.Stat("gs:
EXPECT_EQ(1010, stat_after_renaming.length);
}
TEST(GcsFileSystemTest, RenameFile_Object_DeletionRetried) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": true}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.RenameFile("gs:
"gs:
}
TEST(GcsFileSystemTest, RenameFile_Object_Incomplete) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"{\"done\": false}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_TRUE(errors::IsUnimplemented(fs.RenameFile(
"gs:
}
TEST(GcsFileSystemTest, Stat_Object) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(1010, stat.length);
EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1);
EXPECT_FALSE(stat.is_directory);
}
TEST(GcsFileSystemTest, Stat_Folder) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"subfolder?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"subfolder/\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(0, stat.length);
EXPECT_EQ(0, stat.mtime_nsec);
EXPECT_TRUE(stat.is_directory);
}
TEST(GcsFileSystemTest, Stat_ObjectOrFolderNotFound) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"path?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
EXPECT_EQ(error::Code::NOT_FOUND,
fs.Stat("gs:
}
TEST(GcsFileSystemTest, Stat_Bucket) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(0, stat.length);
EXPECT_EQ(0, stat.mtime_nsec);
EXPECT_TRUE(stat.is_directory);
}
TEST(GcsFileSystemTest, Stat_BucketNotFound) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
EXPECT_EQ(error::Code::NOT_FOUND,
fs.Stat("gs:
}
TEST(GcsFileSystemTest, Stat_Cache) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"subfolder%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"subfolder/\" }]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
for (int i = 0; i < 10; i++) {
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(1010, stat.length);
EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1);
EXPECT_FALSE(stat.is_directory);
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(0, stat.length);
EXPECT_EQ(0, stat.mtime_nsec);
EXPECT_TRUE(stat.is_directory);
}
}
TEST(GcsFileSystemTest, Stat_Cache_Flush) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
for (int i = 0; i < 10; i++) {
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(1010, stat.length);
EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1);
EXPECT_FALSE(stat.is_directory);
}
fs.FlushCaches(nullptr);
for (int i = 0; i < 10; i++) {
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(1010, stat.length);
EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1);
EXPECT_FALSE(stat.is_directory);
}
}
TEST(GcsFileSystemTest, Stat_FilenameEndingWithSlash) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"dir%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"5\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(5, stat.length);
EXPECT_TRUE(stat.is_directory);
}
TEST(GcsFileSystemTest, IsDirectory_NotFound) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=file.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_EQ(error::Code::NOT_FOUND,
fs.IsDirectory("gs:
}
TEST(GcsFileSystemTest, IsDirectory_NotDirectoryButObject) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=file.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_EQ(error::Code::FAILED_PRECONDITION,
fs.IsDirectory("gs:
}
TEST(GcsFileSystemTest, IsDirectory_Yes) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [{\"name\": \"subfolder/\"}]}"),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [{\"name\": \"subfolder/\"}]}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.IsDirectory("gs:
TF_EXPECT_OK(fs.IsDirectory("gs:
}
TEST(GcsFileSystemTest, IsDirectory_Bucket) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.IsDirectory("gs:
TF_EXPECT_OK(fs.IsDirectory("gs:
}
TEST(GcsFileSystemTest, IsDirectory_BucketNotFound) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
EXPECT_EQ(error::Code::NOT_FOUND,
fs.IsDirectory("gs:
}
TEST(GcsFileSystemTest, CreateDir_Folder) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"subpath%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"uploadType=media&name=subpath%2F&ifGenerationMatch=0\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
""),
new FakeHttpRequest(
"Uri: https:
"subpath%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"subpath%2F?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"uploadType=media&name=subpath%2F&ifGenerationMatch=0\n"
"Auth Token: fake_token\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", errors::FailedPrecondition("412"), 412),
});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.CreateDir("gs:
EXPECT_EQ(errors::AlreadyExists("gs:
fs.CreateDir("gs:
EXPECT_EQ(errors::AlreadyExists("gs:
fs.CreateDir("gs:
}
TEST(GcsFileSystemTest, CreateDir_Bucket) {
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
""),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TF_EXPECT_OK(fs.CreateDir("gs:
TF_EXPECT_OK(fs.CreateDir("gs:
}
TEST(GcsFileSystemTest, DeleteRecursively_Ok) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" }]}"),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/\" },"
" { \"name\": \"path/file1.txt\" },"
" { \"name\": \"path/subpath/file2.txt\" },"
" { \"name\": \"path/file3.txt\" }]}"),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Ffile1.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"", errors::Unavailable("500"), 500),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Ffile1.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Fsubpath%2Ffile2.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Ffile3.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
int64_t undeleted_files, undeleted_dirs;
TF_EXPECT_OK(fs.DeleteRecursively("gs:
&undeleted_files, &undeleted_dirs));
EXPECT_EQ(0, undeleted_files);
EXPECT_EQ(0, undeleted_dirs);
}
TEST(GcsFileSystemTest, DeleteRecursively_DeletionErrors) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" }]}"),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{\"items\": [ "
" { \"name\": \"path/file1.txt\" },"
" { \"name\": \"path/subpath/\" },"
" { \"name\": \"path/subpath/file2.txt\" },"
" { \"name\": \"path/file3.txt\" }]}"),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Ffile1.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Fsubpath%2F\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubpath%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"items\": [ "
" { \"name\": \"path/subpath/\" }]}")),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Fsubpath%2Ffile2.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest("Uri: https:
"/bucket/o/path%2Ffile3.txt\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
"", errors::NotFound("404"), 404),
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2Ffile3.txt%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"path%2Ffile3.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
int64_t undeleted_files, undeleted_dirs;
TF_EXPECT_OK(fs.DeleteRecursively("gs:
&undeleted_files, &undeleted_dirs));
EXPECT_EQ(1, undeleted_files);
EXPECT_EQ(1, undeleted_dirs);
}
TEST(GcsFileSystemTest, DeleteRecursively_NotAFolder) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest(
"Uri: https:
"fields=items%2Fname%2CnextPageToken&prefix=path%2F"
"&maxResults=1\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"{}"),
new FakeHttpRequest(
"Uri: https:
"path?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
"", errors::NotFound("404"), 404)});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
int64_t undeleted_files, undeleted_dirs;
EXPECT_EQ(error::Code::NOT_FOUND,
fs.DeleteRecursively("gs:
&undeleted_dirs)
.code());
EXPECT_EQ(0, undeleted_files);
EXPECT_EQ(1, undeleted_dirs);
}
TEST(GcsFileSystemTest, NoConstraintsEnvironmentVariableTest) {
unsetenv("GCS_ALLOWED_BUCKET_LOCATIONS");
GcsFileSystem fs1;
EXPECT_EQ(*kAllowedLocationsDefault, fs1.allowed_locations());
fs1.FlushCaches(nullptr);
}
TEST(GcsFileSystemTest, BucketLocationConstraintEnvironmentVariableTest) {
unsetenv("GCS_ALLOWED_BUCKET_LOCATIONS");
setenv("GCS_ALLOWED_BUCKET_LOCATIONS", "auto", 1);
GcsFileSystem fs1;
EXPECT_EQ(*kAllowedLocationsAuto, fs1.allowed_locations());
setenv("GCS_ALLOWED_BUCKET_LOCATIONS", "CUSTOM,list", 1);
GcsFileSystem fs2;
EXPECT_EQ(std::unordered_set<string>({"custom", "list"}),
fs2.allowed_locations());
}
TEST(GcsFileSystemTest, AdditionalRequestHeaderTest) {
GcsFileSystem fs1;
EXPECT_EQ("", fs1.additional_header_name());
EXPECT_EQ("", fs1.additional_header_value());
setenv("GCS_ADDITIONAL_REQUEST_HEADER",
"X-Add-Header:My Additional Header Value", 1);
GcsFileSystem fs2;
EXPECT_EQ("X-Add-Header", fs2.additional_header_name());
EXPECT_EQ("My Additional Header Value", fs2.additional_header_value());
setenv("GCS_ADDITIONAL_REQUEST_HEADER", "Someinvalidheadervalue", 1);
GcsFileSystem fs3;
EXPECT_EQ("", fs3.additional_header_name());
EXPECT_EQ("", fs3.additional_header_value());
setenv("GCS_ADDITIONAL_REQUEST_HEADER", ":thisisinvalid", 1);
GcsFileSystem fs4;
EXPECT_EQ("", fs4.additional_header_name());
EXPECT_EQ("", fs4.additional_header_value());
setenv("GCS_ADDITIONAL_REQUEST_HEADER", "soisthis:", 1);
GcsFileSystem fs5;
EXPECT_EQ("", fs5.additional_header_name());
EXPECT_EQ("", fs5.additional_header_value());
setenv("GCS_ADDITIONAL_REQUEST_HEADER", "a:b", 1);
GcsFileSystem fs6;
EXPECT_EQ("a", fs6.additional_header_name());
EXPECT_EQ("b", fs6.additional_header_value());
auto* add_header = new std::pair<const string, const string>(
"mynewheader", "newheadercontents");
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header mynewheader: newheadercontents\n"
"Header Hello: world\n",
"{}")});
GcsFileSystem fs7(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
add_header , false );
std::unique_ptr<HttpRequest> request;
TF_EXPECT_OK(fs7.CreateHttpRequest(&request));
request->SetUri("https:
request->AddHeader("Hello", "world");
TF_EXPECT_OK(request->Send());
}
TEST(GcsFileSystemTest, OverrideCacheParameters) {
setenv("GCS_READ_CACHE_BLOCK_SIZE_MB", "16", 1);
setenv("GCS_READ_CACHE_MAX_SIZE_MB", "128", 1);
GcsFileSystem fs1;
EXPECT_EQ(16 * 1024 * 1024, fs1.block_size());
EXPECT_EQ(128 * 1024 * 1024, fs1.max_bytes());
EXPECT_EQ(0, fs1.max_staleness());
EXPECT_EQ(120, fs1.timeouts().connect);
EXPECT_EQ(60, fs1.timeouts().idle);
EXPECT_EQ(3600, fs1.timeouts().metadata);
EXPECT_EQ(3600, fs1.timeouts().read);
EXPECT_EQ(3600, fs1.timeouts().write);
unsetenv("GCS_READ_CACHE_BLOCK_SIZE_MB");
setenv("GCS_READAHEAD_BUFFER_SIZE_BYTES", "123456789", 1);
GcsFileSystem fs2;
EXPECT_EQ(123456789L, fs2.block_size());
setenv("GCS_READ_CACHE_BLOCK_SIZE_MB", "1", 1);
setenv("GCS_READ_CACHE_MAX_SIZE_MB", "16", 1);
setenv("GCS_READ_CACHE_MAX_STALENESS", "60", 1);
GcsFileSystem fs3;
EXPECT_EQ(1048576L, fs3.block_size());
EXPECT_EQ(16 * 1024 * 1024, fs3.max_bytes());
EXPECT_EQ(60, fs3.max_staleness());
setenv("GCS_STAT_CACHE_MAX_AGE", "60", 1);
setenv("GCS_STAT_CACHE_MAX_ENTRIES", "32", 1);
setenv("GCS_MATCHING_PATHS_CACHE_MAX_AGE", "30", 1);
setenv("GCS_MATCHING_PATHS_CACHE_MAX_ENTRIES", "64", 1);
GcsFileSystem fs4;
EXPECT_EQ(60, fs4.stat_cache_max_age());
EXPECT_EQ(32, fs4.stat_cache_max_entries());
EXPECT_EQ(30, fs4.matching_paths_cache_max_age());
EXPECT_EQ(64, fs4.matching_paths_cache_max_entries());
setenv("GCS_REQUEST_CONNECTION_TIMEOUT_SECS", "10", 1);
setenv("GCS_REQUEST_IDLE_TIMEOUT_SECS", "5", 1);
setenv("GCS_METADATA_REQUEST_TIMEOUT_SECS", "20", 1);
setenv("GCS_READ_REQUEST_TIMEOUT_SECS", "30", 1);
setenv("GCS_WRITE_REQUEST_TIMEOUT_SECS", "40", 1);
GcsFileSystem fs5;
EXPECT_EQ(10, fs5.timeouts().connect);
EXPECT_EQ(5, fs5.timeouts().idle);
EXPECT_EQ(20, fs5.timeouts().metadata);
EXPECT_EQ(30, fs5.timeouts().read);
EXPECT_EQ(40, fs5.timeouts().write);
}
TEST(GcsFileSystemTest, CreateHttpRequest) {
std::vector<HttpRequest*> requests(
{
new FakeHttpRequest("Uri: https:
"Auth Token: fake_token\n"
"Header Hello: world\n",
"{}")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<HttpRequest> request;
TF_EXPECT_OK(fs.CreateHttpRequest(&request));
request->SetUri("https:
request->AddHeader("Hello", "world");
TF_EXPECT_OK(request->Send());
}
class TestGcsStats : public GcsStatsInterface {
public:
void Configure(GcsFileSystem* fs, GcsThrottle* throttle,
const FileBlockCache* block_cache) override {
CHECK(fs_ == nullptr);
CHECK(throttle_ == nullptr);
CHECK(block_cache_ == nullptr);
fs_ = fs;
throttle_ = throttle;
block_cache_ = block_cache;
}
void RecordBlockLoadRequest(const string& file, size_t offset) override {
block_load_request_file_ = file;
}
void RecordBlockRetrieved(const string& file, size_t offset,
size_t bytes_transferred) override {
block_retrieved_file_ = file;
block_retrieved_bytes_transferred_ = bytes_transferred;
}
void RecordStatObjectRequest() override { stat_object_request_count_++; }
HttpRequest::RequestStats* HttpStats() override { return nullptr; }
GcsFileSystem* fs_ = nullptr;
GcsThrottle* throttle_ = nullptr;
const FileBlockCache* block_cache_ = nullptr;
string block_load_request_file_;
string block_retrieved_file_;
size_t block_retrieved_bytes_transferred_ = 0;
int stat_object_request_count_ = 0;
};
TEST(GcsFileSystemTest, Stat_StatsRecording) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"file.txt?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}"))});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TestGcsStats stats;
fs.SetStats(&stats);
EXPECT_EQ(stats.fs_, &fs);
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("gs:
EXPECT_EQ(1, stats.stat_object_request_count_);
}
TEST(GcsFileSystemTest, NewRandomAccessFile_StatsRecording) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-5\n"
"Timeouts: 5 1 20\n",
"012345")});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 ,
0 , 0 , 0 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
TestGcsStats stats;
fs.SetStats(&stats);
EXPECT_EQ(stats.fs_, &fs);
std::unique_ptr<RandomAccessFile> file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("gs:
char scratch[6];
absl::string_view result;
TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch));
EXPECT_EQ("012345", result);
EXPECT_EQ("gs:
EXPECT_EQ("gs:
EXPECT_EQ(6, stats.block_retrieved_bytes_transferred_);
}
TEST(GcsFileSystemTest, NewAppendableFile_MultipleFlushesWithCompose) {
std::vector<string> contents(
{"content0,", "content1,", "content2,", "content3,"});
std::vector<HttpRequest*> requests({
new FakeHttpRequest(
"Uri: "
"https:
"some%2Fpath%2Fappendable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: "
"https:
"Auth Token: fake_token\n"
"Range: 0-1048575\n"
"Timeouts: 5 1 20\n",
contents[0]),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=some%2Fpath%2Fappendable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 18\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest(
strings::StrCat("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-17/18\n"
"Timeouts: 5 1 30\n"
"Put body: ",
contents[0], contents[1], "\n"),
""),
new FakeHttpRequest(
"Uri: "
"https:
"o?uploadType=resumable&name=some%2Fpath%2F.tmpcompose%2Fappendable."
"18\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 9\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"",
{{"Location",
"https:
"location"}}),
new FakeHttpRequest(
strings::StrCat("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-8/9\n"
"Timeouts: 5 1 30\n"
"Put body: ",
contents[2], "\n"),
""),
new FakeHttpRequest(
"Uri: "
"https:
"some%2Fpath%2Fappendable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1234\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest("Uri: "
"https:
"some%2Fpath%2Fappendable/compose\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header content-type: application/json\n"
"Post body: {'sourceObjects': [{'name': "
"'some/path/"
"appendable','objectPrecondition':{'"
"ifGenerationMatch':1234}},{'name': "
"'some/path/.tmpcompose/appendable.18'}]}\n",
""),
new FakeHttpRequest("Uri: "
"https:
"some%2Fpath%2F.tmpcompose%2Fappendable.18\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=some%2Fpath%2F.tmpcompose%2Fappendable."
"27\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 9\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest(
strings::StrCat("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-8/9\n"
"Timeouts: 5 1 30\n"
"Put body: ",
contents[3], "\n"),
""),
new FakeHttpRequest(
"Uri: "
"https:
"some%2Fpath%2Fappendable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"4567\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest("Uri: "
"https:
"some%2Fpath%2Fappendable/compose\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Header content-type: application/json\n"
"Post body: {'sourceObjects': [{'name': "
"'some/path/"
"appendable','objectPrecondition':{'"
"ifGenerationMatch':4567}},{'name': "
"'some/path/.tmpcompose/appendable.27'}]}\n",
""),
new FakeHttpRequest("Uri: "
"https:
"some%2Fpath%2F.tmpcompose%2Fappendable."
"27\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n"
"Delete: yes\n",
""),
});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 32 ,
32 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , true );
std::unique_ptr<WritableFile> wfile;
TF_EXPECT_OK(fs.NewAppendableFile("gs:
&wfile));
TF_EXPECT_OK(wfile->Append(contents[1]));
TF_EXPECT_OK(wfile->Flush());
TF_EXPECT_OK(wfile->Append(contents[2]));
TF_EXPECT_OK(wfile->Flush());
TF_EXPECT_OK(wfile->Append(contents[3]));
TF_EXPECT_OK(wfile->Close());
}
TEST(GcsFileSystemTest, NewAppendableFile_MultipleFlushesWithoutCompose) {
std::vector<string> contents(
{"content0,", "content1,", "content2,", "content3,"});
std::vector<HttpRequest*> requests({
new FakeHttpRequest(
"Uri: https:
"path%2Fappendable?fields=size%2Cgeneration%2Cupdated\n"
"Auth Token: fake_token\n"
"Timeouts: 5 1 10\n",
strings::StrCat("{\"size\": \"8\",\"generation\": \"1\","
"\"updated\": \"2016-04-29T23:15:24.896Z\"}")),
new FakeHttpRequest(
"Uri: https:
"Auth Token: fake_token\n"
"Range: 0-1048575\n"
"Timeouts: 5 1 20\n",
contents[0]),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fappendable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 18\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest(
strings::StrCat("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-17/18\n"
"Timeouts: 5 1 30\n"
"Put body: ",
contents[0], contents[1], "\n"),
""),
new FakeHttpRequest("Uri: "
"https:
"bucket/o?"
"uploadType=resumable&name=path%2Fappendable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 27\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"",
{{"Location",
"https:
"location"}}),
new FakeHttpRequest(
strings::StrCat("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-26/27\n"
"Timeouts: 5 1 30\n"
"Put body: ",
contents[0], contents[1], contents[2], "\n"),
""),
new FakeHttpRequest(
"Uri: https:
"uploadType=resumable&name=path%2Fappendable\n"
"Auth Token: fake_token\n"
"Header X-Upload-Content-Length: 36\n"
"Post: yes\n"
"Timeouts: 5 1 10\n",
"", {{"Location", "https:
new FakeHttpRequest(
strings::StrCat("Uri: https:
"Auth Token: fake_token\n"
"Header Content-Range: bytes 0-35/36\n"
"Timeouts: 5 1 30\n"
"Put body: ",
contents[0], contents[1], contents[2], contents[3],
"\n"),
""),
});
GcsFileSystem fs(
std::unique_ptr<AuthProvider>(new FakeAuthProvider),
std::unique_ptr<HttpRequest::Factory>(
new FakeHttpRequestFactory(&requests)),
std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 32 ,
32 , 0 , 3600 ,
0 , 0 ,
0 , kTestRetryConfig,
kTestTimeoutConfig, *kAllowedLocationsDefault,
nullptr , false );
std::unique_ptr<WritableFile> wfile;
TF_EXPECT_OK(
fs.NewAppendableFile("gs:
TF_EXPECT_OK(wfile->Append(contents[1]));
TF_EXPECT_OK(wfile->Flush());
TF_EXPECT_OK(wfile->Append(contents[2]));
TF_EXPECT_OK(wfile->Flush());
TF_EXPECT_OK(wfile->Append(contents[3]));
TF_EXPECT_OK(wfile->Close());
}
TEST(GcsFileSystemTest, AppendModeCompose) {
unsetenv("GCS_APPEND_MODE");
setenv("GCS_APPEND_MODE", "compose", 1);
GcsFileSystem fs1;
EXPECT_EQ(true, fs1.compose_append());
}
TEST(GcsFileSystemTest, AppendModeDefault) {
unsetenv("GCS_APPEND_MODE");
GcsFileSystem fs1;
EXPECT_EQ(false, fs1.compose_append());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8d8fa37-7a95-4e06-b3a1-4b2054b61d5c | cpp | google/tensorstore | box_difference | tensorstore/internal/box_difference.cc | tensorstore/internal/box_difference_test.cc | #include "tensorstore/internal/box_difference.h"
#include <cassert>
#include <limits>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/integer_overflow.h"
namespace tensorstore {
namespace internal {
namespace {
Index GetNumSubtractionSubBoxes(BoxView<> outer, BoxView<> inner) {
assert(outer.rank() == inner.rank());
const DimensionIndex rank = outer.rank();
Index total_count = 1;
for (DimensionIndex i = 0; i < rank; ++i) {
IndexInterval outer_interval = outer[i];
IndexInterval inner_interval = inner[i];
Index num_parts = 1;
if (Intersect(outer_interval, inner_interval).empty()) {
return 1;
}
if (outer_interval.inclusive_min() < inner_interval.inclusive_min()) {
++num_parts;
}
if (outer_interval.inclusive_max() > inner_interval.inclusive_max()) {
++num_parts;
}
total_count *= num_parts;
}
return total_count - 1;
}
}
BoxDifference::BoxDifference(BoxView<> outer, BoxView<> inner)
: outer_(outer),
inner_(inner),
num_sub_boxes_(GetNumSubtractionSubBoxes(outer, inner)) {}
void BoxDifference::GetSubBox(Index sub_box_index, MutableBoxView<> out) const {
const DimensionIndex rank = out.rank();
assert(rank == outer_.rank());
assert(sub_box_index >= 0 && sub_box_index < num_sub_boxes_);
++sub_box_index;
for (DimensionIndex i = 0; i < rank; ++i) {
IndexInterval outer_interval = outer_[i];
IndexInterval inner_interval = inner_[i];
Index num_parts = 1;
IndexInterval intersection = Intersect(outer_interval, inner_interval);
if (intersection.empty()) {
out.DeepAssign(outer_);
return;
}
const bool has_before =
outer_interval.inclusive_min() < inner_interval.inclusive_min();
const bool has_after =
outer_interval.inclusive_max() > inner_interval.inclusive_max();
if (has_before) ++num_parts;
if (has_after) ++num_parts;
const Index part_i = sub_box_index % num_parts;
switch (part_i) {
case 0:
out[i] = intersection;
break;
case 1:
if (has_before) {
out[i] = IndexInterval::UncheckedHalfOpen(
outer_interval.inclusive_min(), inner_interval.inclusive_min());
break;
}
[[fallthrough]];
case 2:
out[i] = IndexInterval::UncheckedHalfOpen(
inner_interval.exclusive_max(), outer_interval.exclusive_max());
break;
}
sub_box_index /= num_parts;
}
}
}
} | #include "tensorstore/internal/box_difference.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::internal::BoxDifference;
std::vector<Box<>> Subtract(BoxView<> outer, BoxView<> inner) {
BoxDifference difference(outer, inner);
Index count = difference.num_sub_boxes();
std::vector<Box<>> boxes(count);
for (Index i = 0; i < count; ++i) {
auto& out = boxes[i];
out.set_rank(outer.rank());
difference.GetSubBox(i, out);
}
return boxes;
}
TEST(BoxDifferenceTest, RankZero) {
EXPECT_THAT(Subtract(BoxView<>(), BoxView<>()),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankOneEmptyResult) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({1}, {5})),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankOneFullResult) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({6}, {5})),
::testing::UnorderedElementsAre(BoxView({1}, {5})));
}
TEST(BoxDifferenceTest, RankOneBeforeOnly) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({3}, {4})),
::testing::UnorderedElementsAre(BoxView({1}, {2})));
}
TEST(BoxDifferenceTest, RankOneAfterOnly) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({0}, {3})),
::testing::UnorderedElementsAre(BoxView({3}, {3})));
}
TEST(BoxDifferenceTest, RankOneBeforeAndAfter) {
EXPECT_THAT(
Subtract(BoxView({1}, {5}), BoxView({2}, {2})),
::testing::UnorderedElementsAre(BoxView({1}, {1}), BoxView({4}, {2})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({1, 2}, {5, 7})),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankTwoDim0FullDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({6, 2}, {5, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {5, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Full) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({1, 10}, {5, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {5, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {3, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {4, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {1, 7}),
BoxView({5, 2}, {1, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Before) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, 4}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 1}, {7, 3})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1After) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, -1}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {7, 4})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1BeforeAfter) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, 2}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 1}, {7, 1}),
BoxView({2, 5}, {7, 1})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 4}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 4}, {3, 5}),
BoxView({4, 2}, {2, 2}),
BoxView({1, 2}, {3, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 4}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({2, 4}, {4, 5}),
BoxView({1, 2}, {1, 2}),
BoxView({2, 2}, {4, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 4}, {3, 7})),
::testing::UnorderedElementsAre(
BoxView({1, 4}, {1, 5}), BoxView({5, 4}, {1, 5}),
BoxView({2, 2}, {3, 2}), BoxView({1, 2}, {1, 2}),
BoxView({5, 2}, {1, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 2}, {3, 1})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {3, 1}),
BoxView({4, 3}, {2, 6}),
BoxView({1, 3}, {3, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 2}, {3, 1})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {4, 1}),
BoxView({1, 3}, {1, 6}),
BoxView({2, 3}, {4, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 2}, {3, 1})),
::testing::UnorderedElementsAre(
BoxView({1, 2}, {1, 1}), BoxView({5, 2}, {1, 1}),
BoxView({2, 3}, {3, 6}), BoxView({1, 3}, {1, 6}),
BoxView({5, 3}, {1, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1BeforeAfter) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 3}, {3, 1})),
::testing::UnorderedElementsAre(
BoxView({1, 3}, {1, 1}), BoxView({5, 3}, {1, 1}),
BoxView({2, 2}, {3, 1}), BoxView({1, 2}, {1, 1}),
BoxView({5, 2}, {1, 1}), BoxView({2, 4}, {3, 5}),
BoxView({1, 4}, {1, 5}), BoxView({5, 4}, {1, 5})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/box_difference.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/box_difference_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f02a4423-d7f6-48a3-99fa-ecbd4b421227 | cpp | tensorflow/tensorflow | cwise_ops | tensorflow/compiler/tf2xla/kernels/cwise_ops.cc | tensorflow/core/kernels/cwise_ops_test.cc | #include "tensorflow/compiler/tf2xla/kernels/cwise_ops.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
void XlaBinaryOp::Compile(XlaOpKernelContext* ctx) {
TensorShape lhs_shape = ctx->InputShape(0);
TensorShape rhs_shape = ctx->InputShape(1);
xla::Shape lhs_xla_shape = ctx->InputXlaShape(0).value();
xla::Shape rhs_xla_shape = ctx->InputXlaShape(1).value();
auto lhs_handle = ctx->Input(0);
auto rhs_handle = ctx->Input(1);
if (lhs_shape.dims() == rhs_shape.dims()) {
auto reconcile_tensor_mismatched_dims = [ctx](
xla::XlaOp lhs, xla::XlaOp rhs,
const xla::Shape& lhs_xla_shape,
const xla::Shape& rhs_xla_shape,
TensorShape* lhs_tensor_shape) {
for (int64_t i = 0; i < lhs_xla_shape.rank(); ++i) {
if (lhs_xla_shape.is_dynamic_dimension(i)) {
if (!rhs_xla_shape.is_dynamic_dimension(i) &&
lhs_xla_shape.dimensions(i) > rhs_xla_shape.dimensions(i) &&
rhs_xla_shape.dimensions(i) != 1) {
auto size = xla::GetDimensionSize(lhs, i);
lhs = xla::SliceInDim(lhs, 0, rhs_xla_shape.dimensions(i), 1,
i);
lhs_tensor_shape->set_dim(i, rhs_xla_shape.dimensions(i));
lhs = xla::SetDimensionSize(lhs, size, i);
}
if (rhs_xla_shape.is_dynamic_dimension(i) &&
lhs_xla_shape.dimensions(i) < rhs_xla_shape.dimensions(i) &&
rhs_xla_shape.dimensions(i) != 1 &&
lhs_xla_shape.dimensions(i) != 1) {
auto size = xla::GetDimensionSize(lhs, i);
int64_t diff =
rhs_xla_shape.dimensions(i) - lhs_xla_shape.dimensions(i);
lhs = xla::PadInDim(
lhs, xla::Zero(ctx->builder(), lhs_xla_shape.element_type()), i,
0, diff);
lhs_tensor_shape->set_dim(i, rhs_xla_shape.dimensions(i));
lhs = xla::SetDimensionSize(lhs, size, i);
}
if (lhs_xla_shape.dimensions(i) == 1 &&
rhs_xla_shape.dimensions(i) != 1) {
auto size = xla::GetDimensionSize(lhs, i);
lhs = xla::RemoveDynamicDimension(lhs, i);
std::vector<int64_t> dimensions(lhs_xla_shape.dimensions().begin(),
lhs_xla_shape.dimensions().end());
dimensions[i] = rhs_xla_shape.dimensions(i);
std::vector<int64_t> broadcast_dimensions(lhs_xla_shape.rank());
absl::c_iota(broadcast_dimensions, 0);
lhs = xla::BroadcastInDim(lhs, dimensions, broadcast_dimensions);
xla::XlaOp rhs_size;
if (rhs_xla_shape.is_dynamic_dimension(i)) {
rhs_size = xla::GetDimensionSize(rhs, i);
} else {
rhs_size = xla::ConstantR0<int32_t>(lhs.builder(),
rhs_xla_shape.dimensions(i));
}
size = xla::Mul(size, rhs_size);
lhs = xla::SetDimensionSize(lhs, size, i);
lhs_tensor_shape->set_dim(i, rhs_xla_shape.dimensions(i));
}
}
}
return lhs;
};
lhs_handle = reconcile_tensor_mismatched_dims(
lhs_handle, rhs_handle, lhs_xla_shape, rhs_xla_shape, &lhs_shape);
rhs_handle = reconcile_tensor_mismatched_dims(
rhs_handle, lhs_handle, rhs_xla_shape, lhs_xla_shape, &rhs_shape);
}
BCast bcast(BCast::FromShape(lhs_shape), BCast::FromShape(rhs_shape),
false);
if (!bcast.IsValid()) {
ctx->SetStatus(absl::InvalidArgumentError(
absl::StrCat("Incompatible shapes: ", lhs_shape.DebugString(), " vs. ",
rhs_shape.DebugString())));
return;
}
std::vector<int64_t> extend_dimension;
int max_rank = std::max(lhs_shape.dims(), rhs_shape.dims());
int min_rank = std::min(lhs_shape.dims(), rhs_shape.dims());
if (min_rank != max_rank) {
for (int i = 0; i < min_rank; ++i) {
extend_dimension.push_back(max_rank - min_rank + i);
}
}
xla::XlaOp output =
Computation(ctx, lhs_handle, lhs_shape.dim_sizes(), rhs_handle,
rhs_shape.dim_sizes(), bcast, extend_dimension);
ctx->SetOutput(0, output);
}
std::pair<xla::XlaOp, xla::XlaOp> XlaBinaryOp::Broadcast(
xla::XlaOp lhs, xla::XlaOp rhs, const BCast& broadcast_helper) {
auto lhs_output = BroadcastTo(lhs, broadcast_helper.output_shape());
if (!lhs_output.ok()) {
xla::XlaOp error = lhs.builder()->ReportError(lhs_output.status());
return {error, error};
}
auto rhs_output = BroadcastTo(rhs, broadcast_helper.output_shape());
if (!rhs_output.ok()) {
xla::XlaOp error = rhs.builder()->ReportError(rhs_output.status());
return {error, error};
}
return {lhs_output.value(), rhs_output.value()};
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
template <typename T>
static Graph* Unary(const string& func, int num, DataType dtype) {
Graph* g = new Graph(OpRegistry::Global());
Tensor data(dtype, TensorShape({64, 64, num / (64 * 64)}));
CHECK_GT(data.NumElements(), 0);
data.flat<T>().setRandom();
test::graph::Unary(g, func, test::graph::Constant(g, data), 0);
return g;
}
const int kRows = 100000;
int RowsAndColsArg(int r, int c) { return r * kRows + c; }
int RowsFromArg(int arg) { return (arg / kRows); }
int ColsFromArg(int arg) { return (arg % kRows); }
#define BM_UNARY(DEVICE, FUNC, T, TYPE) \
void BM_##DEVICE##_##FUNC##_##TYPE(::testing::benchmark::State& state) { \
const int num = state.range(0); \
test::Benchmark(#DEVICE, Unary<T>(#FUNC, num, TYPE), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(T)); \
} \
BENCHMARK(BM_##DEVICE##_##FUNC##_##TYPE) \
->UseRealTime() \
->Range(4 << 10, 1 << 20);
BM_UNARY(cpu, LeakyRelu, float, DT_FLOAT);
BM_UNARY(cpu, LeakyRelu, bfloat16, DT_BFLOAT16);
BM_UNARY(cpu, Floor, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Floor, float, DT_FLOAT);
#endif
BM_UNARY(cpu, Floor, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Floor, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Conj, std::complex<float>, DT_COMPLEX64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Conj, std::complex<float>, DT_COMPLEX64);
#endif
BM_UNARY(cpu, Conj, std::complex<double>, DT_COMPLEX128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Conj, std::complex<double>, DT_COMPLEX128);
#endif
BM_UNARY(cpu, Rint, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Rint, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Rint, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Rint, float, DT_FLOAT);
#endif
BM_UNARY(cpu, Round, double, DT_DOUBLE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Round, double, DT_DOUBLE);
#endif
BM_UNARY(cpu, Round, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_UNARY(gpu, Round, float, DT_FLOAT);
#endif
Graph* BinaryScalar(int num, const string& func) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, TensorShape({}));
rhs.flat<float>().setRandom();
test::graph::Binary(g, func, test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BINARY_SCALAR(DEVICE, FUNC) \
void BM_##DEVICE##_##FUNC##_scalar(::testing::benchmark::State& state) { \
const int num = state.range(0); \
\
test::Benchmark(#DEVICE, BinaryScalar(num, #FUNC), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_##FUNC##_scalar) \
->Arg(1 << 12) \
->Arg(1 << 13) \
->Arg(1 << 14) \
->Arg((1 << 15) - (1 << 13)) \
->Arg(1 << 15) \
->Arg((1 << 15) + (1 << 14)) \
->Arg(1 << 16) \
->Arg((1 << 17) - (1 << 15)) \
->Arg(1 << 17) \
->Arg((1 << 17) + (1 << 16)) \
->Arg(1 << 18) \
->Arg(1 << 19) \
->Arg(1 << 20);
BM_BINARY_SCALAR(cpu, Less);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, Less);
#endif
BM_BINARY_SCALAR(cpu, Add);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, Add);
#endif
BM_BINARY_SCALAR(cpu, DivNoNan);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BINARY_SCALAR(gpu, DivNoNan);
#endif
#undef BM_BINARY_SCALAR
Graph* CubeWithPow3(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, TensorShape({}));
rhs.flat<float>().setConstant(3);
test::graph::Binary(g, "Pow", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
Graph* CubeWithTwoMuls(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
auto* x = test::graph::Constant(g, lhs);
auto* inner = test::graph::Binary(g, "Mul", x, x);
test::graph::Binary(g, "Mul", x, inner);
return g;
}
Graph* CubeWithMulSquare(int num) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
lhs.flat<float>().setRandom();
auto* x = test::graph::Constant(g, lhs);
auto* inner = test::graph::Unary(g, "Square", x);
test::graph::Binary(g, "Mul", test::graph::Constant(g, lhs), inner);
return g;
}
#define BM_CUBE(DEVICE, Impl) \
void BM_##DEVICE##_Cube_##Impl(::testing::benchmark::State& state) { \
const int num = state.range(0); \
\
test::Benchmark(#DEVICE, Impl(num), false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * num; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_Cube_##Impl) \
->UseRealTime() \
->Arg(1 << 12) \
->Arg(1 << 16) \
->Arg(1 << 20);
BM_CUBE(cpu, CubeWithPow3);
BM_CUBE(cpu, CubeWithTwoMuls);
BM_CUBE(cpu, CubeWithMulSquare);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_CUBE(gpu, CubeWithPow3);
BM_CUBE(gpu, CubeWithTwoMuls);
BM_CUBE(gpu, CubeWithMulSquare);
#endif
#undef BM_CUBE
template <class T>
Graph* BiasAdd(int rows, int cols, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor lhs(type, TensorShape({rows, cols}));
lhs.template flat<T>().setRandom();
TensorShape rhs_shape;
rhs_shape = TensorShape({cols});
Tensor rhs(type, rhs_shape);
rhs.template flat<T>().setRandom();
test::graph::Binary(g, "BiasAdd", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, R, C) \
void BM_##DEVICE##_##C_TYPE##_BiasAdd_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
test::Benchmark(#DEVICE, BiasAdd<C_TYPE>(rows, cols, TF_TYPE), \
false) \
.Run(state); \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_##DEVICE##_##C_TYPE##_BiasAdd_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BIAS_ADD_ALL(DEVICE, C_TYPE, TF_TYPE) \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 512, 2048); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 512, 4096); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 2048, 512); \
BM_BIAS_ADD(DEVICE, C_TYPE, TF_TYPE, 4096, 512);
using Eigen::half;
BM_BIAS_ADD_ALL(cpu, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_ALL(gpu, float, DT_FLOAT);
#endif
BM_BIAS_ADD_ALL(cpu, half, DT_HALF);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_ALL(gpu, half, DT_HALF);
#endif
#undef BM_BIAS_ADD_ALL
#undef BM_BIAS_ADD
template <class T>
Graph* BiasAddGrad(int rows, int cols, int channels, DataType type,
TensorFormat format) {
Graph* g = new Graph(OpRegistry::Global());
TensorShape lhs_shape;
if (format == FORMAT_NCHW) {
lhs_shape = TensorShape({channels, rows, cols});
} else {
lhs_shape = TensorShape({rows, cols, channels});
}
Tensor lhs(type, lhs_shape);
lhs.template flat<T>().setRandom();
Node* n;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BiasAddGrad")
.Attr("data_format", ToString(format))
.Input(test::graph::Constant(g, lhs), 0)
.Finalize(g, &n));
return g;
}
#define BM_BIAS_ADD_GRAD(DEVICE, FMT, C_TYPE, TF_TYPE, R, C, CH) \
void BM_##DEVICE##_##FMT##_##C_TYPE##_BiasAddGrad_R##R##_C##C##_CH##CH( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
const int channels = state.range(1); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark( \
#DEVICE, \
BiasAddGrad<C_TYPE>(rows, cols, channels, TF_TYPE, FORMAT_##FMT), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols * channels; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_##DEVICE##_##FMT##_##C_TYPE##_BiasAddGrad_R##R##_C##C##_CH##CH) \
->ArgPair(RowsAndColsArg(R, C), CH);
#define BM_BIAS_ADD_GRAD_ALL(DEVICE, FORMAT, C_TYPE, TF_TYPE) \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 64, 64, 64); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 512, 512, 4); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 512, 512, 1); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 4096, 4096, 4); \
BM_BIAS_ADD_GRAD(DEVICE, FORMAT, C_TYPE, TF_TYPE, 4096, 4096, 1);
using Eigen::half;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NCHW, float, DT_FLOAT);
BM_BIAS_ADD_GRAD_ALL(gpu, NCHW, half, DT_HALF);
#endif
BM_BIAS_ADD_GRAD_ALL(cpu, NHWC, float, DT_FLOAT);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NHWC, float, DT_FLOAT);
#endif
BM_BIAS_ADD_GRAD_ALL(cpu, NHWC, half, DT_HALF);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BIAS_ADD_GRAD_ALL(gpu, NHWC, half, DT_HALF);
#endif
#undef BM_BIAS_ADD_GRAD_ALL
#undef BM_BIAS_ADD_GRAD
Graph* BcastAdd(int rows, int cols, int dim) {
Graph* g = new Graph(OpRegistry::Global());
TensorShape lhs_shape, rhs_shape;
if (dim == 0) {
lhs_shape = TensorShape({rows, cols});
rhs_shape = TensorShape({rows, 1});
} else if (dim == 1) {
lhs_shape = TensorShape({rows, cols});
rhs_shape = TensorShape({cols});
} else if (dim == 2) {
lhs_shape = TensorShape({rows, 1});
rhs_shape = TensorShape({1, cols});
} else {
lhs_shape = TensorShape({1, cols});
rhs_shape = TensorShape({rows, 1});
}
Tensor lhs(DT_FLOAT, lhs_shape);
lhs.flat<float>().setRandom();
Tensor rhs(DT_FLOAT, rhs_shape);
rhs.flat<float>().setRandom();
test::graph::Binary(g, "Add", test::graph::Constant(g, lhs),
test::graph::Constant(g, rhs));
return g;
}
#define BM_BCAST_ADD_ROW(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddRow_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 0), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddRow_R##R##_C##C)->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_ROW_ALL(DEVICE) \
BM_BCAST_ADD_ROW(DEVICE, 512, 2048); \
BM_BCAST_ADD_ROW(DEVICE, 512, 4096); \
BM_BCAST_ADD_ROW(DEVICE, 2048, 512); \
BM_BCAST_ADD_ROW(DEVICE, 4096, 512);
BM_BCAST_ADD_ROW_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_ROW_ALL(gpu);
#endif
#undef BM_BCAST_ADD_ROW_ALL
#undef BM_BCAST_ADD_ROW
#define BM_BCAST_ADD_COL(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCol_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 1), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
\
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCol_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_COL_ALL(DEVICE) \
BM_BCAST_ADD_COL(DEVICE, 512, 2048); \
BM_BCAST_ADD_COL(DEVICE, 512, 4096); \
BM_BCAST_ADD_COL(DEVICE, 2048, 512); \
BM_BCAST_ADD_COL(DEVICE, 4096, 512);
BM_BCAST_ADD_COL_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_COL_ALL(gpu);
#endif
#undef BM_BCAST_ADD_COL_ALL
#undef BM_BCAST_ADD_COL
#define BM_BCAST_ADD_CROSS_RC(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCrossRC_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 2), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
\
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCrossRC_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_CROSS_RC_ALL(DEVICE) \
BM_BCAST_ADD_CROSS_RC(DEVICE, 512, 2048); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 512, 4096); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 2048, 512); \
BM_BCAST_ADD_CROSS_RC(DEVICE, 4096, 512);
BM_BCAST_ADD_CROSS_RC_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_CROSS_RC_ALL(gpu);
#endif
#undef BM_BCAST_ADD_CROSS_RC_ALL
#undef BM_BCAST_ADD_CROSS_RC
#define BM_BCAST_ADD_CROSS_CR(DEVICE, R, C) \
void BM_##DEVICE##_BcastAddCrossCR_R##R##_C##C( \
::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
const int rows = RowsFromArg(arg); \
const int cols = ColsFromArg(arg); \
test::Benchmark(#DEVICE, BcastAdd(rows, cols, 3), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * rows * cols; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_BcastAddCrossCR_R##R##_C##C) \
->UseRealTime() \
->Arg(RowsAndColsArg(R, C));
#define BM_BCAST_ADD_CROSS_CR_ALL(DEVICE) \
BM_BCAST_ADD_CROSS_CR(DEVICE, 512, 2048); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 512, 4096); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 2048, 512); \
BM_BCAST_ADD_CROSS_CR(DEVICE, 4096, 512);
BM_BCAST_ADD_CROSS_CR_ALL(cpu);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_BCAST_ADD_CROSS_CR_ALL(gpu);
#endif
#undef BM_BCAST_ADD_CROSS_CR_ALL
#undef BM_BCAST_ADD_CROSS_CR
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/cwise_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/cwise_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb05537e-6e3d-43eb-98f5-278684d16219 | cpp | tensorflow/tensorflow | literal | third_party/xla/xla/literal.cc | third_party/xla/xla/literal_test.cc | #include "xla/literal.h"
#include <algorithm>
#include <complex>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/core/bitmap.h"
#include "xla/tsl/util/byte_swap_array.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using absl::StrCat;
using primitive_util::NativeTypeOf;
constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
void ConvertEndianShort(std::string* bytes) {
CHECK_EQ(bytes->size() % 2, 0);
for (int64_t i = 0, end = bytes->size(); i < end; i += 2) {
std::swap((*bytes)[i], (*bytes)[i + 1]);
}
}
void ConvertEndianShort(char* bytes, int64_t size) {
CHECK_EQ(size % 2, 0);
for (int64_t i = 0; i < size; i += 2) {
std::swap(bytes[i], bytes[i + 1]);
}
}
bool LiteralProtoHasValues(const LiteralProto& proto) {
return !proto.s2s().empty() || !proto.s4s().empty() || !proto.s8s().empty() ||
!proto.s16s().empty() || proto.s32s_size() || proto.s64s_size() ||
!proto.u2s().empty() || !proto.u4s().empty() || !proto.u8s().empty() ||
!proto.u16s().empty() || proto.u32s_size() || proto.u64s_size() ||
!proto.f8e5m2s().empty() || !proto.f8e4m3s().empty() ||
!proto.f8e4m3fns().empty() || !proto.f8e4m3b11fnuzs().empty() ||
!proto.f8e5m2fnuzs().empty() || !proto.f8e4m3fnuzs().empty() ||
!proto.f8e3m4s().empty() || !proto.f16s().empty() ||
!proto.bf16s().empty() || proto.f32s_size() || proto.f64s_size() ||
proto.c64s_size() || proto.c128s_size() || proto.preds_size() ||
proto.tuple_literals_size();
}
template <PrimitiveType kType>
const Shape& ScalarShapeImpl() {
static_assert(primitive_util::IsArrayType(kType),
"Not a valid type for a scalar.");
static const Shape* shape = [] {
auto shape = new Shape(kType, {}, {}, {});
shape->mutable_layout();
return shape;
}();
return *shape;
}
const Shape& ScalarShape(PrimitiveType type) {
return primitive_util::ArrayTypeSwitch<const Shape&>(
[&](auto primitive_type_constant) -> const Shape& {
return ScalarShapeImpl<primitive_type_constant>();
},
type);
}
const Shape& NilShape() {
static const Shape* shape = new Shape(TUPLE, {}, {}, {});
return *shape;
}
const Shape* TryInternShape(const Shape& shape) {
if (shape.IsTuple() && shape.tuple_shapes_size() == 0) {
return &NilShape();
}
if (shape.IsArray() && shape.dimensions_size() == 0 && shape.is_static() &&
shape.layout().tiles_size() == 0 && shape.layout().memory_space() == 0) {
return &ScalarShape(shape.element_type());
}
return nullptr;
}
struct StrideConfig {
StrideConfig(const Shape& source_shape, const Shape& dest_shape,
absl::Span<const int64_t> dimensions);
absl::Span<const int64_t> dimensions;
DimensionVector base;
DimensionVector step;
int64_t minor_dimension = 0;
int64_t dest_stride = 1;
int64_t source_stride = 1;
int64_t minor_loop_size = 1;
};
StrideConfig::StrideConfig(const Shape& source_shape, const Shape& dest_shape,
absl::Span<const int64_t> dimensions)
: dimensions(dimensions),
base(dimensions.size(), 0),
step(dimensions.size(), 1) {
if (!dimensions.empty()) {
if (dimensions[LayoutUtil::Minor(source_shape.layout(), 0)] >=
dimensions[LayoutUtil::Minor(dest_shape.layout(), 0)]) {
minor_dimension = LayoutUtil::Minor(source_shape.layout(), 0);
dest_stride = IndexUtil::GetDimensionStride(dest_shape, minor_dimension);
} else {
minor_dimension = LayoutUtil::Minor(dest_shape.layout(), 0);
source_stride =
IndexUtil::GetDimensionStride(source_shape, minor_dimension);
}
minor_loop_size = dimensions[minor_dimension];
step[minor_dimension] = minor_loop_size;
}
}
}
LiteralBase::~LiteralBase() = default;
const Shape& LiteralBase::shape() const { return root_piece().subshape(); }
const char* LiteralBase::Piece::buffer() const {
if (auto* r = std::get_if<DenseRep>(&rep_)) {
return r->data;
}
if (auto* r = std::get_if<DenseInlinedRep>(&rep_)) {
return r->data;
}
DCHECK(std::holds_alternative<TupleRep>(rep_) ||
std::holds_alternative<Uninitialized>(rep_));
return nullptr;
}
const LiteralBase::Piece& LiteralBase::piece(
const ShapeIndex& shape_index) const {
const Piece* piece = &root_piece();
for (const auto i : shape_index) {
DCHECK_GE(i, 0);
DCHECK_LT(i, piece->children_size());
piece = &piece->child(i);
}
return *piece;
}
std::ostream& operator<<(std::ostream& out, const Literal& literal) {
out << literal.ToString();
return out;
}
Shape* MutableLiteralBase::mutable_shape_do_not_use() {
const Shape* const_shape = shape_.get();
if (!shape_.OwnsPtr()) {
shape_ = MaybeOwningShapePtr(std::make_unique<Shape>(*shape_));
}
Shape* shape = shape_.get_mutable();
if (shape != const_shape) {
std::function<void(const Shape&, Piece*)> set_piece_shapes =
[&set_piece_shapes](const Shape& shape, Piece* piece) {
piece->set_subshape(&shape);
if (shape.IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
set_piece_shapes(subshape, &piece->child(i));
}
}
};
set_piece_shapes(*shape, &mutable_root_piece());
}
return shape;
}
Literal::Literal() : Literal(NilShape()) {}
Literal::Literal(const Shape& shape)
: Literal(shape, true) {}
void Literal::SetShape(const Shape& shape) {
Shape shape_storage;
const Shape* shape_ptr = &shape;
if (shape.IsArray() && LayoutUtil::HasCustomElementSizeInBits(shape)) {
shape_storage = shape;
shape_storage.mutable_layout()->set_element_size_in_bits(0);
shape_ptr = &shape_storage;
}
if (const Shape* intered_shape_ptr = TryInternShape(*shape_ptr)) {
shape_ = intered_shape_ptr;
} else {
shape_ = std::make_unique<Shape>(*shape_ptr);
}
}
void Literal::SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays,
ArrayValueState leaf_array_value_state) {
if (shape.IsTuple()) {
for (const Shape& subshape : shape.tuple_shapes()) {
Piece child_piece;
child_piece.set_subshape(&subshape);
SetPiece(subshape, &child_piece, allocate_arrays, leaf_array_value_state);
piece->emplace_back(std::move(child_piece));
}
} else if (shape.IsArray()) {
DCHECK(LayoutUtil::IsDenseArray(shape))
<< "literal array storage is currently only supported for dense "
"arrays: "
<< shape;
piece->set_array_value_state(leaf_array_value_state);
if (leaf_array_value_state == LiteralBase::ArrayValueState::kKnown &&
allocate_arrays) {
piece->AllocateBuffers();
}
}
}
Literal::Literal(const Shape& shape, bool allocate_arrays,
ArrayValueState leaf_array_value_state)
: MutableLiteralBase() {
SetShape(shape);
CHECK(leaf_array_value_state != ArrayValueState::kKnown ||
LayoutUtil::HasLayout(*shape_));
root_piece_.set_subshape(shape_.get());
CHECK(&root_piece_.subshape() == shape_.get());
SetPiece(*shape_, &root_piece_, allocate_arrays, leaf_array_value_state);
}
Literal::~Literal() { DeallocateBuffers(); }
void Literal::DeallocateBuffers() {
root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
piece->DeallocateBuffers();
});
}
Literal::Literal(Literal&& other) : MutableLiteralBase() {
*this = std::move(other);
}
Literal& Literal::operator=(Literal&& other) {
DCHECK(&other.root_piece_.subshape() == other.shape_.get());
using std::swap;
swap(shape_, other.shape_);
swap(root_piece_, other.root_piece_);
DCHECK(&root_piece_.subshape() == shape_.get());
return *this;
}
Literal LiteralBase::CreateFromShape(const Shape& shape) {
Literal literal(shape);
literal.root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (piece->subshape().IsArray()) {
memset(piece->untyped_data(), 0, piece->size_bytes_dense());
}
});
return literal;
}
Literal LiteralBase::CreateFromShapeWithUnknownLeafArrays(const Shape& shape) {
Literal literal(shape, false, ArrayValueState::kUnknown);
return literal;
}
Literal LiteralBase::CreateFromShapeWithUndeterminedLeafArrays(
const Shape& shape) {
Literal literal(shape, false,
ArrayValueState::kUndetermined);
return literal;
}
int32_t LiteralBase::GetDynamicSize(int64_t dim_index) const {
return GetDynamicSize(dim_index, {});
}
int32_t LiteralBase::GetDynamicSize(int64_t dim_index,
const ShapeIndex& shape_index) const {
return piece(shape_index).GetDynamicSize(dim_index);
}
std::optional<int64_t> LiteralBase::GetFirstInteger() const {
if (!primitive_util::IsIntegralType(shape().element_type())) {
return std::nullopt;
}
return primitive_util::IntegralTypeSwitch<std::optional<int64_t>>(
[&](auto primitive_type_constant) -> std::optional<int64_t> {
using NativeT = NativeTypeOf<primitive_type_constant>;
auto first_element = GetFirstElement<NativeT>();
if constexpr (std::is_same_v<NativeT, uint64_t>) {
int64_t v = static_cast<int64_t>(first_element);
if (v < 0) {
return std::nullopt;
}
}
return first_element;
},
shape().element_type());
}
void LiteralBase::BuildPieceSubtree(const Shape& shape, Piece* piece) {
CHECK(shape.IsTuple());
for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
Piece child_piece;
child_piece.set_subshape(&subshape);
if (subshape.IsTuple()) {
BuildPieceSubtree(subshape, &child_piece);
}
piece->emplace_back(std::move(child_piece));
}
}
absl::Status LiteralBase::SerializeToString(std::string* output) const {
ShapeProto shape_proto = shape().ToProto();
TF_ASSIGN_OR_RETURN(int64_t size,
ShapeUtil::SerializedSizeWithProto(shape(), shape_proto));
output->resize(size);
return SerializeWithShapeProto(shape_proto, output->data());
}
absl::StatusOr<std::string> LiteralBase::SerializeAsString() const {
std::string result;
TF_RETURN_IF_ERROR(SerializeToString(&result));
return std::move(result);
}
template <typename NativeT>
absl::Status MutableLiteralBase::CopySliceFromInternal(
const LiteralBase& src_literal, absl::Span<const int64_t> src_base,
absl::Span<const int64_t> dest_base, absl::Span<const int64_t> copy_size) {
auto linear_index = [](const Shape& shape,
absl::Span<const int64_t> multi_index) {
return IndexUtil::MultidimensionalIndexToLinearIndex(shape, multi_index);
};
NativeT* dest_data = this->data<NativeT>().data();
const NativeT* src_data = src_literal.data<NativeT>().data();
if (src_literal.shape().rank() == 0 || shape().rank() == 0) {
TF_RET_CHECK(copy_size.empty());
dest_data[linear_index(shape(), dest_base)] =
src_data[linear_index(src_literal.shape(), src_base)];
} else if (!ShapeUtil::IsZeroElementArray(shape()) &&
!ShapeUtil::IsZeroElementArray(src_literal.shape()) &&
absl::c_none_of(copy_size, [](auto d) { return d == 0; })) {
TF_RET_CHECK(src_base.size() == dest_base.size());
TF_RET_CHECK(src_base.size() == copy_size.size());
DimensionVector src_indexes(src_base.size(), 0);
DimensionVector dest_indexes(dest_base.size(), 0);
StrideConfig stride_config(src_literal.shape(), shape(), copy_size);
auto copy_proc = [&](absl::Span<const int64_t> indexes) {
std::transform(indexes.begin(), indexes.end(), src_base.begin(),
src_indexes.begin(), std::plus<int64_t>());
std::transform(indexes.begin(), indexes.end(), dest_base.begin(),
dest_indexes.begin(), std::plus<int64_t>());
int64_t src_index = linear_index(src_literal.shape(), src_indexes);
int64_t dest_index = linear_index(shape(), dest_indexes);
StridedCopy(dest_data + dest_index, stride_config.dest_stride,
src_data + src_index, stride_config.source_stride,
stride_config.minor_loop_size);
return true;
};
ShapeUtil::ForEachIndex(src_literal.shape(), stride_config.base,
stride_config.dimensions, stride_config.step,
copy_proc);
}
return absl::OkStatus();
}
void MutableLiteralBase::CopyElementFrom(const LiteralSlice& src_literal,
absl::Span<const int64_t> src_index,
absl::Span<const int64_t> dest_index) {
DCHECK(LayoutUtil::IsDenseArray(shape()));
DCHECK_EQ(shape().element_type(), src_literal.shape().element_type());
const int64_t src_linear_index =
IndexUtil::MultidimensionalIndexToLinearIndex(src_literal.shape(),
src_index);
const int64_t dest_linear_index =
IndexUtil::MultidimensionalIndexToLinearIndex(shape(), dest_index);
const int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
char* dest_address =
static_cast<char*>(untyped_data()) + dest_linear_index * primitive_size;
const char* source_address =
static_cast<const char*>(src_literal.untyped_data()) +
src_linear_index * primitive_size;
if (dest_address != source_address) {
memcpy(dest_address, source_address, primitive_size);
}
}
absl::StatusOr<Literal> MutableLiteralBase::CreateFromProto(
const LiteralProto& proto, bool prohibit_empty_literal) {
if (!proto.has_shape()) {
return InvalidArgument("LiteralProto has no shape");
}
Shape shape(proto.shape());
if (ShapeUtil::HasPrimitiveType(shape, OPAQUE_TYPE)) {
return InvalidArgument(
"Literal shape cannot include OPAQUE_TYPE sub-shape");
}
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("LiteralProto has no layout");
}
if (LayoutUtil::IsSparseArray(shape)) {
return Unimplemented("Sparse literals are not supported");
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
Literal literal(shape);
TF_RETURN_IF_ERROR(literal.root_piece_.ForEachMutableSubpieceWithStatus(
[&](const ShapeIndex& index, Piece* piece) -> absl::Status {
const LiteralProto* proto_element = &proto;
for (int64_t i : index) {
CHECK(i < proto_element->tuple_literals_size());
proto_element = &proto_element->tuple_literals(i);
}
if (piece->subshape().IsTuple()) {
if (proto_element->tuple_literals_size() !=
ShapeUtil::TupleElementCount(piece->subshape())) {
return InvalidArgument(
"Expected %d tuple elements in LiteralProto, has %d",
ShapeUtil::TupleElementCount(piece->subshape()),
proto_element->tuple_literals_size());
}
return absl::OkStatus();
}
if (piece->subshape().element_type() == TOKEN) {
return absl::OkStatus();
}
CHECK(piece->subshape().IsArray());
if (prohibit_empty_literal || LiteralProtoHasValues(*proto_element)) {
TF_RETURN_IF_ERROR(piece->CopyFromProto(*proto_element));
}
return absl::OkStatus();
}));
return std::move(literal);
}
Literal Literal::SubLiteral(ShapeIndexView shape_index) {
if (!shape_index.empty()) {
auto decomposed = this->DecomposeTuple();
return decomposed.at(shape_index.front())
.SubLiteral(shape_index.subspan(1));
} else {
return std::move(*this);
}
}
std::vector<Literal> Literal::DecomposeTuple() {
CHECK(shape().IsTuple());
std::vector<Literal> elements;
const auto tuple_element_count = ShapeUtil::TupleElementCount(shape());
elements.reserve(tuple_element_count);
for (int i = 0; i < tuple_element_count; ++i) {
elements.push_back(Literal(ShapeUtil::GetSubshape(shape(), {i}),
false));
Literal& element = elements.back();
element.root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* dest_piece) {
if (dest_piece->subshape().IsTuple()) {
return;
}
ShapeIndex src_index = {i};
for (int64_t j : index) {
src_index.push_back(j);
}
Piece& src_piece = piece(src_index);
dest_piece->MoveDataFrom(src_piece);
});
}
*this = Literal();
return elements;
}
namespace {
template <typename NativeT>
void CopyElementsBetween(absl::Span<NativeT> dest,
absl::Span<const NativeT> src, const Shape& dest_shape,
const Shape& src_shape) {
DCHECK(LayoutUtil::IsDenseArray(dest_shape));
DCHECK(LayoutUtil::IsDenseArray(src_shape));
DCHECK(ShapeUtil::Compatible(dest_shape, src_shape));
if (ShapeUtil::IsZeroElementArray(dest_shape)) {
return;
}
std::vector<int64_t> index(dest_shape.rank());
do {
dest[IndexUtil::MultidimensionalIndexToLinearIndex(dest_shape, index)] =
src[IndexUtil::MultidimensionalIndexToLinearIndex(src_shape, index)];
} while (IndexUtil::BumpIndices(dest_shape, absl::MakeSpan(index)));
}
}
int32_t LiteralBase::Piece::GetDynamicSize(int64_t dim_index) const {
CHECK(LayoutUtil::IsDenseArray(subshape()));
if (!subshape_->is_dynamic_dimension(dim_index)) {
return subshape_->dimensions(dim_index);
}
return dynamic_size_buffer()[dim_index];
}
void LiteralBase::Piece::SetDynamicSize(int64_t dim_index, int32_t size) {
CHECK(LayoutUtil::IsDenseArray(subshape()));
CHECK(subshape_->is_dynamic_dimension(dim_index));
dynamic_size_buffer()[dim_index] = size;
}
void LiteralBase::Piece::AllocateBuffers() {
const int64_t bytes = total_bytes_dense();
if (bytes > kMaxInlinedBytes) {
CHECK_EQ(buffer(), nullptr);
rep_.emplace<DenseRep>();
set_buffer(
static_cast<char*>(tsl::port::AlignedMalloc(bytes, kMinimumAlignment)));
} else {
rep_.emplace<DenseInlinedRep>();
}
}
void LiteralBase::Piece::DeallocateBuffers() {
if (auto* array_rep = GetDenseRep()) {
tsl::port::AlignedFree(array_rep->data);
rep_.emplace<Uninitialized>();
}
}
template <typename NativeT>
void LiteralBase::Piece::CopyElementsWithDynamicBound(
const LiteralBase::Piece& src) {
auto& dest_shape = subshape();
auto& src_shape = src.subshape();
CHECK(dest_shape.is_static() || src_shape.is_static());
auto& bound_shape = dest_shape.is_static() ? src_shape : dest_shape;
if (ShapeUtil::IsZeroElementArray(dest_shape)) {
return;
}
if (dest_shape.rank() == 1) {
int64_t count = std::min(GetDynamicSize(0), src.GetDynamicSize(0));
std::copy_n(src.data<NativeT>().begin(), count, data<NativeT>().begin());
return;
}
std::vector<int64_t> index(dest_shape.rank());
do {
bool out_of_bound = false;
for (int64_t i = 0; i < index.size(); ++i) {
if (index[i] >= GetDynamicSize(i) || index[i] >= src.GetDynamicSize(i)) {
out_of_bound = true;
}
}
if (out_of_bound) {
continue;
}
data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(dest_shape,
index)] =
src.data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
src_shape, index)];
} while (IndexUtil::BumpIndices(bound_shape, absl::MakeSpan(index)));
}
absl::Status LiteralBase::Piece::CopyFrom(const LiteralBase::Piece& src,
bool only_dynamic_bound) {
CHECK(subshape_ != nullptr);
CHECK(src.subshape_ != nullptr);
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK(LayoutUtil::IsDenseArray(src.subshape()))
<< __func__ << " is only supported for dense arrays: " << src.subshape();
if (!only_dynamic_bound) {
CHECK(ShapeUtil::Compatible(subshape(), src.subshape()));
}
if (src.array_value_state_ == ArrayValueState::kUnknown ||
src.array_value_state_ == ArrayValueState::kUndetermined) {
if (array_value_state_ == ArrayValueState::kKnown) {
DeallocateBuffers();
}
array_value_state_ = src.array_value_state_;
return absl::OkStatus();
} else {
CHECK(src.array_value_state_ == ArrayValueState::kKnown);
if (array_value_state_ == ArrayValueState::kUndetermined ||
array_value_state_ == ArrayValueState::kUnknown) {
AllocateBuffers();
}
array_value_state_ = src.array_value_state_;
}
if (ShapeUtil::Equal(subshape(), src.subshape())) {
memcpy(buffer(), src.buffer(), src.size_bytes_dense());
} else {
std::vector<int64_t> origin(subshape().rank(), 0);
primitive_util::ArrayTypeSwitch<void>(
[&](auto primitive_type_constant) {
using NativeT = NativeTypeOf<primitive_type_constant>;
if (only_dynamic_bound) {
CopyElementsWithDynamicBound<NativeT>(src);
} else {
CopyElementsBetween<NativeT>(this->data<NativeT>(),
src.data<NativeT>(), subshape(),
src.subshape());
}
},
subshape().element_type());
}
DCHECK_EQ(dynamic_size_buffer_bytes(), src.dynamic_size_buffer_bytes());
if (subshape().is_dynamic() && src.subshape().is_dynamic()) {
memcpy(dynamic_size_buffer(), src.dynamic_size_buffer(),
src.dynamic_size_buffer_bytes());
}
return absl::OkStatus();
}
void MutableLiteralBase::SetDynamicSize(int64_t dim_index, int32_t size) {
return SetDynamicSize(dim_index, {}, size);
}
void MutableLiteralBase::SetDynamicSize(int64_t dim_index,
const ShapeIndex& shape_index,
int32_t size) {
Shape* subshape =
ShapeUtil::GetMutableSubshape(mutable_shape_do_not_use(), shape_index);
CHECK(LayoutUtil::IsDenseArray(*subshape))
<< __func__ << " is only supported for dense arrays: " << *subshape;
CHECK_GE(subshape->dimensions(dim_index), size);
subshape->set_dynamic_dimension(dim_index, true);
CHECK_EQ(&piece(shape_index).subshape(), subshape);
piece(shape_index).SetDynamicSize(dim_index, size);
}
absl::Status MutableLiteralBase::CopyFrom(const LiteralSlice& src_literal,
const ShapeIndex& dest_shape_index,
const ShapeIndex& src_shape_index,
bool only_dynamic_bound) {
const Shape& dest_subshape =
ShapeUtil::GetSubshape(shape(), dest_shape_index);
const Shape& src_subshape =
ShapeUtil::GetSubshape(src_literal.shape(), src_shape_index);
if (only_dynamic_bound) {
auto& bound_shape =
dest_subshape.is_static() ? src_subshape : dest_subshape;
auto& compact_shape =
dest_subshape.is_static() ? dest_subshape : src_subshape;
CHECK(ShapeUtil::DynamicShapeIsCompatible(compact_shape, bound_shape))
<< compact_shape.ToString() << " vs " << bound_shape.ToString();
} else {
if (!ShapeUtil::Compatible(dest_subshape, src_subshape)) {
return InvalidArgument(
"Destination subshape incompatible with source subshape: %s vs %s",
ShapeUtil::HumanString(dest_subshape),
ShapeUtil::HumanString(src_subshape));
}
}
return mutable_root_piece().ForEachMutableSubpieceWithStatus(
[&](const ShapeIndex& index, Piece* piece) {
if (!piece->subshape().IsArray()) {
return absl::OkStatus();
}
bool in_subtree_to_copy = true;
for (int i = 0; i < dest_shape_index.size(); ++i) {
if (index[i] != dest_shape_index[i]) {
in_subtree_to_copy = false;
break;
}
}
if (!in_subtree_to_copy) {
return absl::OkStatus();
}
ShapeIndex src_piece_index = src_shape_index;
for (int64_t i = dest_shape_index.size(), end = index.size(); i < end;
++i) {
src_piece_index.push_back(index[i]);
}
TF_RETURN_IF_ERROR(
piece->CopyFrom(src_literal.piece(src_piece_index),
only_dynamic_bound));
return absl::OkStatus();
});
}
absl::Status Literal::MoveFrom(Literal&& src_literal,
const ShapeIndex& dest_shape_index) {
const Shape& dest_subshape =
ShapeUtil::GetSubshape(shape(), dest_shape_index);
if (!ShapeUtil::Equal(dest_subshape, src_literal.shape())) {
return InvalidArgument(
"Destination subshape not equal to source shape: %s vs %s",
ShapeUtil::HumanString(dest_subshape),
ShapeUtil::HumanString(src_literal.shape()));
}
src_literal.root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& src_index, Piece* src_piece) {
if (!src_piece->subshape().IsArray()) {
return;
}
ShapeIndex dest_index = dest_shape_index;
for (int64_t i : src_index) {
dest_index.push_back(i);
}
Piece& dest_piece = piece(dest_index);
dest_piece.DeallocateBuffers();
dest_piece.MoveDataFrom(*src_piece);
});
src_literal.shape_ = MaybeOwningShapePtr(&NilShape());
src_literal.root_piece_ = Piece();
src_literal.root_piece_.set_subshape(src_literal.shape_.get());
return absl::OkStatus();
}
absl::Status MutableLiteralBase::CopySliceFrom(
const LiteralSlice& src_literal, absl::Span<const int64_t> src_base,
absl::Span<const int64_t> dest_base, absl::Span<const int64_t> copy_size) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(shape())) << shape();
TF_RET_CHECK(LayoutUtil::IsDenseArray(src_literal.shape()))
<< src_literal.shape();
TF_RET_CHECK(ShapeUtil::SameElementType(src_literal.shape(), shape()));
TF_RET_CHECK(src_literal.shape().rank() == src_base.size());
TF_RET_CHECK(shape().rank() == dest_base.size());
return primitive_util::ArrayTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
using NativeT = NativeTypeOf<primitive_type_constant>;
return CopySliceFromInternal<NativeT>(src_literal, src_base, dest_base,
copy_size);
},
shape().element_type());
}
void MutableLiteralBase::PopulateR1(const tsl::core::Bitmap& values) {
CHECK(shape().IsArray());
CHECK_EQ(shape().rank(), 1);
CHECK_EQ(element_count(), values.bits());
CHECK_EQ(shape().element_type(), PRED);
for (int64_t i = 0; i < static_cast<int64_t>(values.bits()); ++i) {
Set({i}, values.get(i));
}
}
void MutableLiteralBase::PopulateInplaceInternal(
absl::FunctionRef<void(void*, absl::Span<const int64_t>, int)> populator,
bool parallel) {
const Shape& this_shape = shape();
const int64_t rank = this_shape.rank();
DCHECK(LayoutUtil::IsDenseArray(this_shape));
char* const dest_base = static_cast<char*>(untyped_data());
if (rank > 0) {
StrideConfig stride_config(this_shape, this_shape, this_shape.dimensions());
const int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
const int64_t num_elements = ShapeUtil::ElementsIn(shape());
if (parallel && this_shape.rank() == 1) {
const int64_t thread_count =
ShapeUtil::GetForEachIndexParallelThreadCount();
stride_config.dest_stride = stride_config.minor_loop_size =
num_elements > 32 ? std::max<int64_t>(num_elements / thread_count, 1)
: num_elements;
stride_config.step = {stride_config.minor_loop_size};
}
auto init_function = [&](absl::Span<const int64_t> indexes,
int thread_id) -> absl::StatusOr<bool> {
const int64_t index =
IndexUtil::MultidimensionalIndexToLinearIndex(shape(), indexes);
DimensionVector minor_scan_indexes(rank, 0);
std::copy(indexes.begin(), indexes.end(), minor_scan_indexes.begin());
char* dest_ptr = dest_base + index * primitive_size;
char* const dest_end =
dest_base +
std::min(index + stride_config.minor_loop_size, num_elements) *
primitive_size;
while (dest_ptr < dest_end) {
populator(dest_ptr, minor_scan_indexes, thread_id);
++minor_scan_indexes[stride_config.minor_dimension];
dest_ptr += primitive_size;
}
return true;
};
if (parallel) {
ShapeUtil::ForEachIndexParallel(this_shape, stride_config.base,
stride_config.dimensions,
stride_config.step, init_function);
} else {
ShapeUtil::ForEachIndex(
this_shape, stride_config.base, stride_config.dimensions,
stride_config.step,
[&init_function](
absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
auto result_ignored = init_function(indexes, -1);
return true;
});
}
} else {
populator(dest_base, {}, -1);
}
}
absl::Status MutableLiteralBase::PopulateInplace(
absl::FunctionRef<void(void*, absl::Span<const int64_t>)> populator) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
PopulateInplaceInternal(
[&](void* dest, absl::Span<const int64_t> indexes, int ) {
return populator(dest, indexes);
},
false);
return absl::OkStatus();
}
absl::Status MutableLiteralBase::PopulateInplaceParallel(
absl::FunctionRef<void(void*, absl::Span<const int64_t>, int)> populator) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
PopulateInplaceInternal(populator,
element_count() > 32);
return absl::OkStatus();
}
Literal LiteralBase::Relayout(const Layout& new_layout,
const ShapeIndex& shape_index) const {
Shape new_shape = shape();
Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index);
TF_CHECK_OK(LayoutUtil::ValidateLayoutForShape(new_layout, *subshape));
*subshape->mutable_layout() = new_layout;
if (subshape->layout().element_size_in_bits() == 4) {
subshape->mutable_layout()->set_element_size_in_bits(0);
}
Literal result(new_shape);
TF_CHECK_OK(result.CopyFrom(*this));
return result;
}
Literal LiteralBase::Relayout(const Shape& shape_with_layout) const {
CHECK(ShapeUtil::Compatible(shape_with_layout, shape()))
<< "Given shape_with_layout " << ShapeUtil::HumanString(shape_with_layout)
<< " not compatible with literal shape "
<< ShapeUtil::HumanString(shape());
Literal result = CreateFromShape(shape_with_layout);
ShapeUtil::ForEachSubshape(
result.shape(),
[this, &result](const Shape& subshape, const ShapeIndex& index) {
if (subshape.IsArray()) {
TF_CHECK_OK(result.CopyFrom(*this,
index,
index));
}
});
return result;
}
Literal LiteralBase::ToBoundedDynamic(const Shape& bounded_shape) const {
CHECK(bounded_shape.is_dynamic());
Literal result(bounded_shape);
ShapeUtil::ForEachSubshape(
shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
if (bounded_shape.is_dynamic_dimension(i)) {
result.SetDynamicSize(i, subshape.dimensions(i));
}
}
});
TF_CHECK_OK(result.CopyFrom(*this, {}, {}, true));
return result;
}
Literal LiteralBase::ToStatic() const {
Shape new_shape = shape();
ShapeUtil::ForEachMutableSubshape(
&new_shape, [this](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
for (int64_t i = 0; i < subshape->rank(); ++i) {
if (!subshape->is_dynamic_dimension(i)) continue;
subshape->set_dynamic_dimension(i, false);
subshape->set_dimensions(i, GetDynamicSize(i, index));
}
});
Literal result(new_shape);
TF_CHECK_OK(result.CopyFrom(*this, {}, {}, true));
return result;
}
namespace {
template <int64_t PRIMITIVE_SIZE>
absl::StatusOr<Literal> BroadcastHelper(const LiteralBase& src,
const Shape& src_shape,
const Shape& result_shape,
absl::Span<const int64_t> dimensions) {
for (int64_t i = 0, end = dimensions.size(); i < end; i++) {
TF_RET_CHECK(src_shape.dimensions(i) ==
result_shape.dimensions(dimensions[i]));
}
TF_RET_CHECK(result_shape.element_type() == src_shape.element_type());
Literal result(result_shape);
if (src_shape.is_dynamic()) {
for (int64_t i = 0; i < dimensions.size(); ++i) {
if (src_shape.is_dynamic_dimension(i)) {
int64_t dynamic_size = src.GetDynamicSize(i);
result.SetDynamicSize(dimensions[i], dynamic_size);
}
}
}
int src_shape_dims = src_shape.dimensions_size();
std::vector<int64_t> scratch_source_index(src_shape_dims);
absl::Span<int64_t> scratch_source_span(scratch_source_index);
int64_t* scratch_source_array = scratch_source_span.data();
const char* source_data = static_cast<const char*>(src.untyped_data());
char* dest_data = static_cast<char*>(result.untyped_data());
auto src_minor_to_major = LayoutUtil::MinorToMajor(src_shape);
auto result_minor_to_major = LayoutUtil::MinorToMajor(result_shape);
ShapeUtil::ForEachIndexNoStatus(
result_shape, [&](absl::Span<const int64_t> output_index) {
int64_t dest_index = IndexUtil::MultidimensionalIndexToLinearIndex(
result_shape, result_minor_to_major, output_index);
int64_t source_index;
for (int64_t i = 0, end = dimensions.size(); i < end; ++i) {
scratch_source_array[i] = output_index[dimensions[i]];
}
if (src_shape_dims == 1) {
source_index = scratch_source_array[0];
DCHECK_EQ(source_index,
IndexUtil::MultidimensionalIndexToLinearIndex(
src_shape, src_minor_to_major, scratch_source_span));
} else {
source_index = IndexUtil::MultidimensionalIndexToLinearIndex(
src_shape, src_minor_to_major, scratch_source_span);
}
memcpy(dest_data + PRIMITIVE_SIZE * dest_index,
source_data + PRIMITIVE_SIZE * source_index, PRIMITIVE_SIZE);
return true;
});
return std::move(result);
}
}
absl::StatusOr<Literal> LiteralBase::Broadcast(
const Shape& result_shape, absl::Span<const int64_t> dimensions) const {
const LiteralBase& src = *this;
const Shape& src_shape = shape();
if (!src_shape.IsArray()) {
return InvalidArgument("Broadcast only supports arrays.");
}
const int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(src_shape.element_type());
switch (primitive_size) {
case 0:
return BroadcastHelper<0>(src, src_shape, result_shape, dimensions);
case 1:
return BroadcastHelper<1>(src, src_shape, result_shape, dimensions);
case 2:
return BroadcastHelper<2>(src, src_shape, result_shape, dimensions);
case 4:
return BroadcastHelper<4>(src, src_shape, result_shape, dimensions);
case 8:
return BroadcastHelper<8>(src, src_shape, result_shape, dimensions);
case 16:
return BroadcastHelper<16>(src, src_shape, result_shape, dimensions);
default:
LOG(FATAL) << "Unhandled primitive size " << primitive_size;
return InvalidArgument("Unhandled primitive size");
break;
}
}
absl::StatusOr<Literal> LiteralBase::Reshape(
absl::Span<const int64_t> dimensions) const {
if (!LayoutUtil::IsDenseArray(shape())) {
return InvalidArgument("Reshape is only supported for dense arrays.");
}
if (shape().is_dynamic()) {
return Unimplemented("Dynamic reshape is not implemented.");
}
Literal output;
if (!LayoutUtil::IsMonotonicWithDim0Major(shape().layout())) {
output = Relayout(LayoutUtil::GetDefaultLayoutForRank(shape().rank()));
} else {
output = Clone();
}
*output.mutable_shape_do_not_use() =
ShapeUtil::MakeShape(shape().element_type(), dimensions);
int64_t elements_before = ShapeUtil::ElementsIn(shape());
int64_t elements_after = ShapeUtil::ElementsIn(output.shape());
if (elements_before != elements_after) {
return InvalidArgument(
"Shapes before and after Literal::Reshape have different numbers "
"of elements: %s vs %s.",
ShapeUtil::HumanString(shape()),
ShapeUtil::HumanString(output.shape()));
}
return std::move(output);
}
Literal LiteralBase::Transpose(absl::Span<const int64_t> permutation) const {
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
CHECK(shape().rank() == permutation.size() && IsPermutation(permutation))
<< "Given permutation is not a permutation of dimension numbers";
Shape permuted_shape = ShapeUtil::PermuteDimensions(permutation, shape());
std::vector<int64_t> inverse_permutation = InversePermutation(permutation);
CHECK(LayoutUtil::IsDenseArray(permuted_shape));
Layout* layout = permuted_shape.mutable_layout();
layout->clear_minor_to_major();
for (auto index : LayoutUtil::MinorToMajor(shape())) {
layout->add_minor_to_major(inverse_permutation[index]);
}
Literal new_literal(permuted_shape);
if (shape().is_dynamic()) {
for (int64_t i = 0; i < shape().rank(); i++) {
if (shape().is_dynamic_dimension(i)) {
new_literal.SetDynamicSize(inverse_permutation[i], GetDynamicSize(i));
}
}
}
DCHECK_EQ(ShapeUtil::ByteSizeOf(new_literal.shape()),
ShapeUtil::ByteSizeOf(shape()));
std::memcpy(new_literal.untyped_data(), untyped_data(), size_bytes());
return new_literal;
}
namespace {
template <typename NativeT>
void SliceInternal(const LiteralBase& src_literal,
absl::Span<const int64_t> start_indices,
Literal& result_literal) {
const Shape& result_shape = result_literal.shape();
DimensionVector new_indices(result_shape.rank());
TF_CHECK_OK(
result_literal.Populate<NativeT>([&](absl::Span<const int64_t> indices) {
for (int64_t i = 0; i < result_shape.rank(); ++i) {
new_indices[i] = indices[i] + start_indices[i];
}
return src_literal.Get<NativeT>(new_indices);
}));
for (int64_t dnum = 0; dnum < src_literal.shape().rank(); ++dnum) {
if (src_literal.shape().is_dynamic_dimension(dnum)) {
int64_t dynamic_size =
src_literal.GetDynamicSize(dnum) - start_indices[dnum];
CHECK_GE(dynamic_size, 0) << src_literal.GetDynamicSize(dnum);
dynamic_size = std::min(dynamic_size, result_shape.dimensions(dnum));
result_literal.SetDynamicSize(dnum, dynamic_size);
}
}
}
}
Literal LiteralBase::Slice(absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices) const {
CHECK(shape().IsArray()) << "tuple is not supported for slice";
DimensionVector result_dimensions;
for (int64_t dnum = 0; dnum < shape().rank(); ++dnum) {
CHECK_GE(start_indices[dnum], 0);
CHECK_LE(limit_indices[dnum], shape().dimensions(dnum))
<< "dnum = " << dnum;
int64_t dimension = limit_indices[dnum] - start_indices[dnum];
CHECK_GE(dimension, 0) << "dnum = " << dnum;
result_dimensions.push_back(dimension);
}
auto result_shape = ShapeUtil::MakeShapeWithDenseLayout(
shape().element_type(), result_dimensions,
LayoutUtil::MinorToMajor(shape()));
ShapeUtil::CopyDynamicDimensions(&result_shape, shape());
Literal result_literal(result_shape);
primitive_util::ArrayTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
using NativeT = NativeTypeOf<primitive_type_constant>;
return SliceInternal<NativeT>(*this, start_indices, result_literal);
},
result_shape.element_type());
return result_literal;
}
Literal LiteralBase::Clone() const {
Literal result(shape());
TF_CHECK_OK(result.CopyFrom(*this));
return result;
}
std::unique_ptr<Literal> LiteralBase::CloneToUnique() const {
auto result = std::make_unique<Literal>(shape());
TF_CHECK_OK(result->CopyFrom(*this));
return result;
}
bool LiteralBase::IsDetermined(const ShapeIndex& shape_index) const {
return piece(shape_index).IsDetermined();
}
bool LiteralBase::IsKnown(const ShapeIndex& shape_index) const {
return piece(shape_index).IsKnown();
}
std::string LiteralBase::GetAsString(absl::Span<const int64_t> multi_index,
const ShapeIndex& shape_index) const {
const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
CHECK(LayoutUtil::IsDenseArray(subshape));
return primitive_util::ArrayTypeSwitch<std::string>(
[&](auto primitive_type_constant) -> std::string {
using NativeT = NativeTypeOf<primitive_type_constant>;
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return StrCat(Get<NativeT>(multi_index, shape_index));
}
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
return RoundTripFpToString(Get<NativeT>(multi_index, shape_index));
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
NativeT c = Get<NativeT>(multi_index, shape_index);
return StrCat("(", RoundTripFpToString(c.real()), ", ",
RoundTripFpToString(c.imag()), ")");
}
if constexpr (primitive_type_constant == PRED) {
return Get<bool>(multi_index, shape_index) ? "true" : "false";
}
LOG(FATAL) << PrimitiveType_Name(subshape.element_type());
},
subshape.element_type());
}
std::optional<int64_t> LiteralBase::GetIntegralAsS64(
absl::Span<const int64_t> multi_index) const {
CHECK(LayoutUtil::IsDenseArray(shape()));
return primitive_util::PrimitiveTypeSwitch<std::optional<int64_t>>(
[&](auto primitive_type_constant) -> std::optional<int64_t> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant) ||
primitive_type_constant == PRED) {
using NativeT = NativeTypeOf<primitive_type_constant>;
return Get<NativeT>(multi_index);
}
return std::nullopt;
},
shape().element_type());
}
std::optional<double> LiteralBase::GetAsDouble(
absl::Span<const int64_t> multi_index) const {
const Shape& s = shape();
CHECK(LayoutUtil::IsDenseArray(s));
return primitive_util::PrimitiveTypeSwitch<std::optional<double>>(
[&](auto primitive_type_constant) -> std::optional<double> {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = NativeTypeOf<primitive_type_constant>;
return static_cast<double>(Get<NativeT>(multi_index));
}
return std::nullopt;
},
s.element_type());
}
std::optional<double> LiteralBase::GetSumAsDouble(
absl::Span<const int64_t> linear_indices) const {
const Shape& s = shape();
CHECK(LayoutUtil::IsDenseArray(s));
if (!primitive_util::IsFloatingPointType(s.element_type())) {
return std::nullopt;
}
return primitive_util::FloatingPointTypeSwitch<double>(
[&](auto primitive_type_constant) -> double {
using NativeT = NativeTypeOf<primitive_type_constant>;
double sum = 0.0;
auto d = root_piece().data<NativeT>();
for (const int64_t idx : linear_indices) {
sum += static_cast<double>(d[idx]);
}
return sum;
},
s.element_type());
}
std::optional<complex128> LiteralBase::GetAsComplex128(
absl::Span<const int64_t> multi_index) const {
return primitive_util::PrimitiveTypeSwitch<std::optional<complex128>>(
[&](auto primitive_type_constant) -> std::optional<complex128> {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
using NativeT = NativeTypeOf<primitive_type_constant>;
if constexpr (primitive_util::IsComplexType(
primitive_type_constant)) {
return {Get<NativeT>(multi_index)};
}
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
return {{static_cast<double>(Get<NativeT>(multi_index)), 0}};
}
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant) &&
primitive_type_constant != S64 &&
primitive_type_constant != U64) {
return {{static_cast<double>(Get<NativeT>(multi_index)), 0}};
}
}
return std::nullopt;
},
shape().element_type());
}
absl::Status MutableLiteralBase::SetIntegralAsS64(
absl::Span<const int64_t> multi_index, int64_t value) {
CHECK(LayoutUtil::IsDenseArray(shape()));
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant) ||
primitive_type_constant == PRED) {
using NativeT = NativeTypeOf<primitive_type_constant>;
Set<NativeT>(multi_index, static_cast<NativeT>(value));
return absl::OkStatus();
}
return FailedPrecondition("Array element type is not integral: %s",
PrimitiveType_Name(shape().element_type()));
},
shape().element_type());
}
absl::Status MutableLiteralBase::SetFromDouble(
absl::Span<const int64_t> multi_index, double value) {
CHECK(LayoutUtil::IsDenseArray(shape()));
if (!primitive_util::IsFloatingPointType(shape().element_type())) {
return FailedPrecondition("Array element type is not integral: %s",
PrimitiveType_Name(shape().element_type()));
}
primitive_util::FloatingPointTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
using NativeT = NativeTypeOf<primitive_type_constant>;
Set<NativeT>(multi_index, static_cast<NativeT>(value));
},
shape().element_type());
return absl::OkStatus();
}
namespace {
void PrintShape(bool print_layout, const Shape& shape, Printer* printer) {
if (print_layout) {
ShapeUtil::PrintHumanStringWithLayout(printer, shape);
} else {
ShapeUtil::PrintHumanString(printer, shape);
}
}
void PrintHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
bool print_shape, bool print_layout, bool oneline,
Printer* printer);
void TuplePrintHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
bool print_shape, bool print_layout, bool oneline,
Printer* printer) {
const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
printer->Append(oneline ? "( " : "(\n");
for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) {
ShapeIndex element_index = shape_index;
element_index.push_back(i);
if (i > 0) printer->Append(oneline ? ", " : ",\n");
PrintHelper(literal, element_index, print_shape, print_layout, oneline,
printer);
}
printer->Append(oneline ? " )" : "\n)");
}
void DenseArrayPrintHelper(const LiteralBase& literal,
const ShapeIndex& shape_index, bool print_shape,
bool print_layout, bool oneline, Printer* printer) {
const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
int64_t rank = subshape.rank();
const absl::string_view linebreak = oneline ? " " : "\n";
std::function<void(absl::Span<const int64_t> dimensions,
std::vector<int64_t>*)>
print_recursive = [&](absl::Span<const int64_t> dimensions,
std::vector<int64_t>* accum_indices) {
CHECK_EQ(rank, dimensions.size() + accum_indices->size());
auto brace_to_string = [&](std::string brace) -> std::string {
if (rank == 1) {
return brace;
}
if (dimensions.size() == 1 && brace == "{") {
return StrCat(oneline ? "" : " ", brace,
dimensions[0] <= 1 ? "" : " ");
}
if (dimensions.size() == 1 && brace == "}") {
return StrCat(dimensions[0] <= 1 ? "" : " ", brace);
}
if (brace == "{") {
const int64_t accum_indices_size = accum_indices->size();
if (rank > 3 && !accum_indices->empty() &&
accum_indices_size < rank) {
int index = accum_indices->size() - 1;
int value = accum_indices->back();
int size = dimensions.front();
return StrCat(brace, " ",
size > 0 ? linebreak : "");
}
return StrCat(brace, linebreak);
}
return StrCat(linebreak, brace);
};
if (dimensions.empty()) {
std::string elem;
if (subshape.element_type() == PRED && rank > 0) {
elem = literal.Get<bool>(*accum_indices, shape_index) ? "1" : "0";
} else {
elem = literal.GetAsString(*accum_indices, shape_index);
}
printer->Append(elem);
} else {
printer->Append(brace_to_string("{"));
for (int i = 0; i < dimensions[0]; ++i) {
accum_indices->push_back(i);
print_recursive(dimensions.subspan(1), accum_indices);
accum_indices->pop_back();
if (i < dimensions[0] - 1) {
printer->Append(",");
printer->Append(dimensions.size() > 1 ? linebreak : " ");
}
}
printer->Append(brace_to_string("}"));
}
};
if (print_shape) {
PrintShape(print_layout, subshape, printer);
if (subshape.is_dynamic()) {
printer->Append("(");
for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {
printer->Append(literal.GetDynamicSize(i, shape_index));
if (i < subshape.dimensions_size() - 1) {
printer->Append(",");
}
}
printer->Append(")");
}
printer->Append(" ");
}
std::vector<int64_t> indices = {};
std::vector<int64_t> dimensions;
dimensions.reserve(subshape.rank());
for (int64_t i = 0; i < subshape.rank(); ++i) {
dimensions.push_back(literal.GetDynamicSize(i, shape_index));
}
print_recursive(dimensions, &indices);
}
void PrintHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
bool print_shape, bool print_layout, bool oneline,
Printer* printer) {
const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
CHECK(LayoutUtil::HasLayout(literal.shape()));
CHECK(LayoutUtil::HasLayout(subshape));
if (subshape.IsTuple()) {
TuplePrintHelper(literal, shape_index, print_shape, print_layout, oneline,
printer);
} else if (subshape.IsToken()) {
printer->Append("token");
} else {
CHECK(LayoutUtil::IsDenseArray(subshape));
if (literal.IsKnown(shape_index)) {
DenseArrayPrintHelper(literal, shape_index, print_shape, print_layout,
oneline, printer);
} else {
PrintShape(print_layout, subshape, printer);
printer->Append(" ");
if (literal.IsDetermined(shape_index)) {
printer->Append("unknown");
} else {
printer->Append("undetermined");
}
}
}
}
}
void LiteralBase::Print(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, false,
false, printer);
}
void LiteralBase::PrintOneline(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, false,
true, printer);
}
void LiteralBase::PrintWithoutShape(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, false, false,
false, printer);
}
void LiteralBase::PrintWithoutShapeOneline(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, false, false,
true, printer);
}
void LiteralBase::PrintWithLayout(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, true,
false, printer);
}
void LiteralBase::PrintWithLayoutOneline(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, true,
true, printer);
}
std::string LiteralBase::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringOneline() const {
StringPrinter printer;
PrintOneline(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithoutShape() const {
StringPrinter printer;
PrintWithoutShape(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithoutShapeOneline() const {
StringPrinter printer;
PrintWithoutShapeOneline(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithLayout() const {
StringPrinter printer;
PrintWithLayout(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithLayoutOneline() const {
StringPrinter printer;
PrintWithLayoutOneline(&printer);
return std::move(printer).ToString();
}
void LiteralBase::EachCellAsString(
absl::FunctionRef<void(absl::Span<const int64_t> indices,
const std::string& value)>
per_cell) const {
if (ShapeUtil::IsZeroElementArray(shape())) {
return;
}
auto indices = IndexUtil::LinearIndexToMultidimensionalIndex(
shape(), 0);
do {
per_cell(indices, GetAsString(indices));
} while (IndexUtil::BumpIndices(shape(), absl::MakeSpan(indices)));
}
namespace {
template <typename NativeSrcT, typename NativeDestT>
void ConvertBetweenNativeTypes(absl::Span<const NativeSrcT> src_data,
void* dst_base) {
static_assert(!std::is_same_v<NativeSrcT, NativeDestT>);
auto converter = [](NativeSrcT src) -> NativeDestT {
if constexpr (!std::is_same_v<NativeDestT, bool> &&
!std::numeric_limits<NativeSrcT>::is_integer &&
std::numeric_limits<NativeDestT>::is_integer) {
if (src != src) {
return NativeDestT{0};
}
if (src >=
static_cast<NativeSrcT>(std::numeric_limits<NativeDestT>::max())) {
return std::numeric_limits<NativeDestT>::max();
}
if (src <=
static_cast<NativeSrcT>(std::numeric_limits<NativeDestT>::lowest())) {
return std::numeric_limits<NativeDestT>::lowest();
}
}
if constexpr (sizeof(src) == 1 &&
std::is_same_v<NativeDestT, tsl::float8_e3m4>) {
return static_cast<NativeDestT>(static_cast<half>(src));
} else {
return static_cast<NativeDestT>(src);
}
};
NativeDestT* dest_data = static_cast<NativeDestT*>(dst_base);
for (const NativeSrcT& src : src_data) {
*(dest_data++) = converter(src);
}
}
template <PrimitiveType kSrcType>
absl::Status ConvertIfDestTypeMatches(const LiteralBase& src_literal,
MutableLiteralBase& dst_literal) {
DCHECK(dst_literal.shape().IsArray());
using NativeSrcT = NativeTypeOf<kSrcType>;
auto src_data = src_literal.data<NativeSrcT>();
void* dst_base = dst_literal.untyped_data();
DCHECK_EQ(src_data.size(), dst_literal.element_count());
return primitive_util::ArrayTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsComplexType(kSrcType) &&
!primitive_util::IsComplexType(primitive_type_constant)) {
return Unimplemented("%s from type %s to type %s is not implemented.",
"Converting", PrimitiveType_Name(kSrcType),
PrimitiveType_Name(primitive_type_constant()));
} else if constexpr (kSrcType != primitive_type_constant) {
using NativeDestT = NativeTypeOf<primitive_type_constant>;
ConvertBetweenNativeTypes<NativeSrcT, NativeDestT>(src_data,
dst_base);
}
return absl::OkStatus();
},
dst_literal.shape().element_type());
}
absl::StatusOr<Literal> ConvertSwitch(const LiteralBase& literal,
PrimitiveType primitive_dest_type) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(literal.shape()));
if (literal.shape().element_type() == primitive_dest_type) {
return literal.Clone();
}
if (!primitive_util::IsArrayType(primitive_dest_type) ||
!primitive_util::IsArrayType(literal.shape().element_type())) {
return Unimplemented("%s from type %s to type %s is not implemented.",
"Converting",
PrimitiveType_Name(literal.shape().element_type()),
PrimitiveType_Name(primitive_dest_type));
}
Literal result(
ShapeUtil::ChangeElementType(literal.shape(), primitive_dest_type));
TF_RETURN_IF_ERROR(primitive_util::ArrayTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
return ConvertIfDestTypeMatches<primitive_type_constant>(literal,
result);
},
literal.shape().element_type()));
return result;
}
}
absl::StatusOr<Literal> LiteralBase::Convert(
PrimitiveType primitive_dest_type) const {
return ConvertSwitch(*this, primitive_dest_type);
}
absl::StatusOr<Literal> LiteralBase::BitcastConvert(
const Shape& dest_shape) const {
if (ShapeUtil::ByteSizeOf(dest_shape) != ShapeUtil::ByteSizeOf(shape())) {
return InvalidArgument(
"Can not bitcast-convert from shape %s to a shape of different size %s",
shape().ToString(), dest_shape.ToString());
}
if (dest_shape.IsTuple() || shape().IsTuple()) {
return InvalidArgument(
"bitcast-convert is not valid for tuple shapes %s->%s",
shape().ToString(), dest_shape.ToString());
}
if (shape().is_dynamic() || dest_shape.is_dynamic()) {
return InvalidArgument(
"bitcast-convert is not valid for dynamic shape %s->%s",
shape().ToString(), dest_shape.ToString());
}
Literal out(dest_shape);
std::memcpy(out.root_piece_.buffer(), root_piece().buffer(),
root_piece().size_bytes_dense());
if constexpr (!kLittleEndian) {
size_t input_elem_size =
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
TF_RETURN_IF_ERROR(tsl::ByteSwapArray(
const_cast<char*>(out.root_piece().buffer()), input_elem_size,
out.root_piece().size_bytes_dense() / input_elem_size));
size_t output_elem_size =
ShapeUtil::ByteSizeOfPrimitiveType(dest_shape.element_type());
TF_RETURN_IF_ERROR(tsl::ByteSwapArray(
const_cast<char*>(out.root_piece().buffer()), output_elem_size,
out.root_piece().size_bytes_dense() / output_elem_size));
}
return out;
}
absl::StatusOr<Literal> LiteralBase::ConvertToShape(
const Shape& dest_shape) const {
if (!dest_shape.IsTuple()) {
return Convert(dest_shape.element_type());
}
std::vector<Literal> elements;
const auto tuple_element_count = ShapeUtil::TupleElementCount(shape());
elements.reserve(tuple_element_count);
for (int i = 0; i < tuple_element_count; ++i) {
auto element = LiteralSlice(*this, {i});
TF_ASSIGN_OR_RETURN(
auto new_element,
element.ConvertToShape(ShapeUtil::GetSubshape(dest_shape, {i})));
elements.push_back(std::move(new_element));
}
return MutableLiteralBase::MoveIntoTuple(absl::MakeSpan(elements));
}
Literal MutableLiteralBase::MoveIntoTuple(
absl::Span<Literal> elements) {
std::vector<const Shape*> element_shapes;
element_shapes.reserve(elements.size());
for (const Literal& element : elements) {
element_shapes.push_back(&element.shape());
}
Literal literal(ShapeUtil::MakeTupleShapeWithPtrs(element_shapes),
false);
for (int i = 0, end = elements.size(); i < end; ++i) {
TF_CHECK_OK(
literal.MoveFrom(std::move(elements[i]), {i}));
}
return literal;
}
template <typename NativeT>
bool LiteralBase::Piece::EqualElementsInternal(
const LiteralBase::Piece& other, std::vector<int64_t>* multi_index) const {
if (multi_index->size() == subshape().rank()) {
return (Get<NativeT>(*multi_index) == other.Get<NativeT>(*multi_index));
}
for (int64_t i = 0; i < GetDynamicSize(multi_index->size()); ++i) {
multi_index->push_back(i);
if (!EqualElementsInternal<NativeT>(other, multi_index)) {
return false;
}
multi_index->pop_back();
}
return true;
}
bool LiteralBase::Piece::EqualDynamicSize(
const LiteralBase::Piece& other) const {
DCHECK(ShapeUtil::Compatible(subshape(), other.subshape()));
if (subshape().is_static()) {
return true;
}
for (int64_t i = 0; i < subshape().rank(); ++i) {
if (GetDynamicSize(i) != other.GetDynamicSize(i)) {
return false;
}
}
return true;
}
bool LiteralBase::Piece::EqualElements(const LiteralBase::Piece& other) const {
if (subshape().is_static() &&
ShapeUtil::Equal(subshape(), other.subshape()) && subshape().IsArray()) {
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK_EQ(size_bytes_dense(), other.size_bytes_dense());
if (primitive_util::IsSubByteNonPredType(subshape().element_type())) {
CHECK(!primitive_util::IsFloatingPointType(subshape().element_type()));
auto one_array = buffer();
auto two_array = other.buffer();
const int bits_per_element =
primitive_util::BitWidth(subshape().element_type());
const uint8_t mask = LsbMask<uint8_t>(bits_per_element);
for (int64_t i = 0; i < size_bytes_dense(); ++i) {
if ((one_array[i] & mask) != (two_array[i] & mask)) return false;
}
return true;
}
return memcmp(buffer(), other.buffer(), size_bytes_dense()) == 0;
}
std::vector<int64_t> multi_index;
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeSrcT = NativeTypeOf<primitive_type_constant>;
return EqualElementsInternal<NativeSrcT>(other, &multi_index);
},
subshape().element_type());
}
bool LiteralBase::Equal(const LiteralBase& other, bool layout_sensitive) const {
if (!ShapeUtil::EqualStructure(shape(), other.shape())) {
return false;
}
return root_piece().ForEachSubpieceWithBool([&](const ShapeIndex& index,
const Piece& piece) {
const Piece& other_piece = other.piece(index);
const Shape& subshape = piece.subshape();
const Shape& other_subshape = other_piece.subshape();
if (subshape.element_type() != other_subshape.element_type()) {
return false;
}
if (!piece.subshape().IsArray()) {
return true;
}
if (subshape.rank() != other_subshape.rank()) {
return false;
}
if (layout_sensitive && (subshape.layout() != other_subshape.layout())) {
return false;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
if (piece.GetDynamicSize(i) != other_piece.GetDynamicSize(i)) {
return false;
}
}
if (!piece.EqualElements(other_piece)) {
return false;
}
return true;
});
}
template <typename NativeT>
static bool EqualIncludingNan(NativeT a, NativeT b) {
if constexpr (std::numeric_limits<NativeT>::has_quiet_NaN ||
std::numeric_limits<NativeT>::has_signaling_NaN) {
if (Eigen::numext::isnan(a) && Eigen::numext::isnan(b)) {
return true;
}
}
return a == b;
}
template <typename T>
static bool EqualIncludingNan(std::complex<T> a, std::complex<T> b) {
return EqualIncludingNan(a.real(), b.real()) &&
EqualIncludingNan(a.imag(), b.imag());
}
template <typename NativeT>
static bool AllElementsEqualValue(absl::Span<const NativeT> data,
NativeT value) {
for (int64_t i = 0; i < data.size(); ++i) {
if (memcmp(&data[i], &value, sizeof value)) {
return false;
}
}
return true;
}
bool Literal::Piece::IsAll(const Literal& scalar) const {
CHECK(ShapeUtil::IsScalar(scalar.shape())) << scalar.shape().ToString();
if (!subshape().IsArray()) {
return false;
}
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK_EQ(subshape().element_type(), scalar.shape().element_type());
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
return AllElementsEqualValue(this->data<NativeT>(),
scalar.GetFirstElement<NativeT>());
},
subshape().element_type());
}
int64_t Literal::Piece::CountAll(const Literal& scalar) const {
CHECK(ShapeUtil::IsScalar(scalar.shape())) << scalar.shape().ToString();
if (!subshape().IsArray()) {
return 0;
}
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK_EQ(subshape().element_type(), scalar.shape().element_type());
return primitive_util::ArrayTypeSwitch<int64_t>(
[&](auto primitive_type_constant) -> int64_t {
using NativeT = NativeTypeOf<primitive_type_constant>;
return absl::c_count_if(
this->data<NativeT>(), [&](NativeT elem) -> bool {
return EqualIncludingNan(elem, scalar.GetFirstElement<NativeT>());
});
},
subshape().element_type());
}
bool LiteralBase::IsAll(const Literal& scalar) const {
return root_piece().IsAll(scalar);
}
bool LiteralBase::IsAll(int8_t value) const {
if (!shape().IsArray()) {
return false;
}
PrimitiveType ty = shape().element_type();
if (primitive_util::IsFloatingPointType(ty)) {
return IsAllFloatImpl(value, false);
}
if (primitive_util::IsUnsignedIntegralType(ty) && value < 0) {
return false;
}
Literal scalar(ShapeUtil::MakeScalarShape(ty));
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
NativeT converted(value);
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
if (!Eigen::numext::isfinite(converted)) {
return false;
}
}
if constexpr (!primitive_util::IsComplexType(primitive_type_constant)) {
if (static_cast<int8_t>(converted) != value) {
return false;
}
}
scalar.Set<NativeT>({}, converted);
return root_piece().IsAll(scalar);
},
ty);
}
bool LiteralBase::IsAllFloat(float value) const {
return IsAllFloatImpl(value, true);
}
bool LiteralBase::IsAllFloatImpl(float value, bool round_value) const {
PrimitiveType ty = shape().element_type();
if (!primitive_util::IsFloatingPointType(ty)) {
return false;
}
Literal scalar(ShapeUtil::MakeScalarShape(ty));
return primitive_util::FloatingPointTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
scalar.Set<NativeT>({}, static_cast<NativeT>(value));
if (!round_value && scalar.GetAsDouble({}) != value) {
return false;
}
return root_piece().IsAll(scalar);
},
ty);
}
bool LiteralBase::IsAllComplex(complex64 value) const {
PrimitiveType ty = shape().element_type();
if (!primitive_util::IsComplexType(ty)) {
return false;
}
Literal scalar(ShapeUtil::MakeScalarShape(ty));
return primitive_util::ComplexTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
scalar.Set<NativeT>({}, static_cast<NativeT>(value));
return root_piece().IsAll(scalar);
},
ty);
}
bool LiteralBase::IsAllFirst() const {
if (!shape().IsArray()) {
return false;
}
if (ShapeUtil::IsZeroElementArray(shape())) {
return false;
}
absl::InlinedVector<int64_t, 4> start_indices(shape().rank(), 0);
absl::InlinedVector<int64_t, 4> end_indices(shape().rank(), 1);
Literal first = Slice(start_indices, end_indices);
return IsAll(first.Reshape({}).value());
}
bool LiteralBase::IsR1Iota() const {
if (!shape().IsArray()) {
return false;
}
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
if (shape().rank() != 1) {
return false;
}
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
const int64_t elements = ShapeUtil::ElementsIn(shape());
for (int64_t idx = 0; idx < elements; ++idx) {
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant)) {
if (static_cast<int64_t>(Get<NativeT>({idx})) != idx) {
return false;
}
} else if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
if (Get<NativeT>({idx}) != static_cast<NativeT>(idx)) {
return false;
}
} else if constexpr (primitive_util::IsComplexType(
primitive_type_constant)) {
if (Get<NativeT>({idx}) != NativeT(idx, 0.0f)) {
return false;
}
} else {
return false;
}
}
return true;
},
shape().element_type());
}
std::optional<int64_t> LiteralBase::IsR1StridedIota() const {
if (!shape().IsArray() || shape().rank() != 1) {
return std::nullopt;
}
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
const int64_t elements = ShapeUtil::ElementsIn(shape());
const PrimitiveType type = shape().element_type();
if (elements <= 1 || !primitive_util::IsIntegralType(type)) {
return std::nullopt;
}
return primitive_util::IntegralTypeSwitch<std::optional<int64_t>>(
[&](auto primitive_type_constant) -> std::optional<int64_t> {
using NativeT = NativeTypeOf<primitive_type_constant>;
const int64_t stride = static_cast<int64_t>(Get<NativeT>({1}));
if (stride == 0) {
return std::nullopt;
}
for (int64_t idx = 0; idx < elements; ++idx) {
if (static_cast<int64_t>(Get<NativeT>({idx})) != idx * stride) {
return std::nullopt;
}
}
return stride;
},
shape().element_type());
}
bool LiteralBase::IsZero(absl::Span<const int64_t> indices) const {
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
return Get<NativeT>(indices) == NativeT{0};
},
shape().element_type());
}
namespace {
template <typename RepeatedFieldT, typename NativeT>
void CopyToRepeatedField(RepeatedFieldT* dest,
const absl::Span<const NativeT> src) {
*dest = RepeatedFieldT(src.begin(), src.end());
}
}
void LiteralBase::Piece::set_array_value_state(ArrayValueState state) {
array_value_state_ = state;
}
LiteralBase::ArrayValueState LiteralBase::Piece::get_array_value_state() const {
return array_value_state_;
}
void LiteralBase::Piece::WriteToProto(LiteralProto* proto) const {
*proto->mutable_shape() = subshape().ToProto();
switch (subshape().element_type()) {
case PRED:
CopyToRepeatedField(proto->mutable_preds(), data<bool>());
break;
case U2:
*proto->mutable_u2s() = std::string(
reinterpret_cast<const char*>(data<u2>().data()), size_bytes_dense());
break;
case U4:
*proto->mutable_u4s() = std::string(
reinterpret_cast<const char*>(data<u4>().data()), size_bytes_dense());
break;
case U8:
proto->set_u8s(static_cast<const unsigned char*>(data<uint8_t>().data()),
element_count());
break;
case U16:
*proto->mutable_u16s() =
std::string(reinterpret_cast<const char*>(data<uint16_t>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_u16s());
}
break;
case U32:
CopyToRepeatedField(proto->mutable_u32s(), data<uint32_t>());
break;
case U64:
CopyToRepeatedField(proto->mutable_u64s(), data<uint64_t>());
break;
case S2:
*proto->mutable_s2s() = std::string(
reinterpret_cast<const char*>(data<s2>().data()), size_bytes_dense());
break;
case S4:
*proto->mutable_s4s() = std::string(
reinterpret_cast<const char*>(data<s4>().data()), size_bytes_dense());
break;
case S8:
proto->set_s8s(static_cast<const signed char*>(data<int8_t>().data()),
element_count());
break;
case S16:
*proto->mutable_s16s() =
std::string(reinterpret_cast<const char*>(data<int16_t>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_s16s());
}
break;
case S32:
CopyToRepeatedField(proto->mutable_s32s(), data<int32_t>());
break;
case S64:
CopyToRepeatedField(proto->mutable_s64s(), data<int64_t>());
break;
case F8E5M2:
*proto->mutable_f8e5m2s() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e5m2>().data()),
size_bytes_dense());
break;
case F8E4M3:
*proto->mutable_f8e4m3s() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3>().data()),
size_bytes_dense());
break;
case F8E4M3FN:
*proto->mutable_f8e4m3fns() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3fn>().data()),
size_bytes_dense());
break;
case F8E4M3B11FNUZ:
*proto->mutable_f8e4m3b11fnuzs() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3b11fnuz>().data()),
size_bytes_dense());
break;
case F8E5M2FNUZ:
*proto->mutable_f8e5m2fnuzs() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e5m2fnuz>().data()),
size_bytes_dense());
break;
case F8E4M3FNUZ:
*proto->mutable_f8e4m3fnuzs() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3fnuz>().data()),
size_bytes_dense());
break;
case F8E3M4:
*proto->mutable_f8e3m4s() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e3m4>().data()),
size_bytes_dense());
break;
case F16:
*proto->mutable_f16s() =
std::string(reinterpret_cast<const char*>(data<half>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_f16s());
}
break;
case BF16:
*proto->mutable_bf16s() =
std::string(reinterpret_cast<const char*>(data<bfloat16>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_bf16s());
}
break;
case F32:
CopyToRepeatedField(proto->mutable_f32s(), data<float>());
break;
case F64:
CopyToRepeatedField(proto->mutable_f64s(), data<double>());
break;
case C64:
for (complex64 value : data<complex64>()) {
proto->add_c64s(value.real());
proto->add_c64s(value.imag());
}
break;
case C128:
for (complex128 value : data<complex128>()) {
proto->add_c128s(value.real());
proto->add_c128s(value.imag());
}
break;
case TUPLE:
case TOKEN:
return;
default:
LOG(FATAL) << "Unhandled primitive type "
<< PrimitiveType_Name(subshape().element_type());
}
}
const void* LiteralBase::Piece::untyped_data() const {
DCHECK(LayoutUtil::IsDenseArray(subshape()))
<< ShapeUtil::HumanString(subshape());
return buffer();
}
void* LiteralBase::Piece::untyped_data() {
DCHECK(LayoutUtil::IsDenseArray(subshape()))
<< ShapeUtil::HumanString(subshape());
return buffer();
}
namespace {
template <typename RepeatedFieldT, typename NativeT>
absl::Status CopyFromRepeatedField(absl::Span<NativeT> dest,
const RepeatedFieldT& src) {
if (dest.size() != src.size()) {
return InvalidArgument(
"Expected %lu elements in LiteralProto repeated field, has %d",
dest.size(), src.size());
}
std::copy(src.begin(), src.end(), dest.begin());
return absl::OkStatus();
}
}
absl::Status LiteralBase::Piece::CopyFromProto(const LiteralProto& proto) {
TF_RET_CHECK(proto.has_shape());
Shape shape(proto.shape());
TF_RET_CHECK(LayoutUtil::HasLayout(shape));
TF_RET_CHECK(ShapeUtil::Equal(shape, subshape()));
switch (subshape().element_type()) {
case PRED:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<bool>(), proto.preds()));
break;
case S2: {
const std::string& s(proto.s2s());
TF_RET_CHECK(data<s2>().size() * sizeof(s2) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case S4: {
const std::string& s(proto.s4s());
TF_RET_CHECK(data<s4>().size() * sizeof(s4) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case S8: {
auto s8_data = data<int8_t>();
TF_RET_CHECK(proto.s8s().size() == s8_data.size());
std::copy(proto.s8s().begin(), proto.s8s().end(), s8_data.begin());
break;
}
case S16: {
const std::string& s(proto.s16s());
TF_RET_CHECK(data<int16_t>().size() * sizeof(int16_t) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case S32:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int32_t>(), proto.s32s()));
break;
case S64:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int64_t>(), proto.s64s()));
break;
case U2: {
const std::string& s(proto.u2s());
TF_RET_CHECK(data<u2>().size() * sizeof(u2) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case U4: {
const std::string& s(proto.u4s());
TF_RET_CHECK(data<u4>().size() * sizeof(u4) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case U8: {
auto u8_data = data<uint8_t>();
TF_RET_CHECK(proto.u8s().size() == u8_data.size());
std::copy(proto.u8s().begin(), proto.u8s().end(), u8_data.begin());
break;
}
case U16: {
const std::string& s(proto.u16s());
TF_RET_CHECK(data<uint16_t>().size() * sizeof(uint16_t) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case U32:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint32_t>(), proto.u32s()));
break;
case U64:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint64_t>(), proto.u64s()));
break;
case F8E5M2: {
const std::string& s(proto.f8e5m2s());
TF_RET_CHECK(data<tsl::float8_e5m2>().size() * sizeof(tsl::float8_e5m2) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3: {
const std::string& s(proto.f8e4m3s());
TF_RET_CHECK(data<tsl::float8_e4m3>().size() * sizeof(tsl::float8_e4m3) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3FN: {
const std::string& s(proto.f8e4m3fns());
TF_RET_CHECK(data<tsl::float8_e4m3fn>().size() *
sizeof(tsl::float8_e4m3fn) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3B11FNUZ: {
const std::string& s(proto.f8e4m3b11fnuzs());
TF_RET_CHECK(data<tsl::float8_e4m3b11fnuz>().size() *
sizeof(tsl::float8_e4m3b11fnuz) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E5M2FNUZ: {
const std::string& s(proto.f8e5m2fnuzs());
TF_RET_CHECK(data<tsl::float8_e5m2fnuz>().size() *
sizeof(tsl::float8_e5m2fnuz) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3FNUZ: {
const std::string& s(proto.f8e4m3fnuzs());
TF_RET_CHECK(data<tsl::float8_e4m3fnuz>().size() *
sizeof(tsl::float8_e4m3fnuz) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E3M4: {
const std::string& s(proto.f8e3m4s());
TF_RET_CHECK(data<tsl::float8_e3m4>().size() * sizeof(tsl::float8_e3m4) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F16: {
const std::string& s(proto.f16s());
TF_RET_CHECK(data<half>().size() * sizeof(half) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case BF16: {
const std::string& s(proto.bf16s());
TF_RET_CHECK(data<bfloat16>().size() * sizeof(bfloat16) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case F32:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<float>(), proto.f32s()));
break;
case F64:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<double>(), proto.f64s()));
break;
case C64: {
auto complex_data = data<complex64>();
TF_RET_CHECK(proto.c64s_size() == complex_data.size() * 2);
for (int64_t i = 0; i < complex_data.size(); ++i) {
complex_data[i] = complex64{proto.c64s(i * 2), proto.c64s(i * 2 + 1)};
}
break;
}
case C128: {
auto complex_data = data<complex128>();
const int64_t complex_data_size_doubled = complex_data.size() * 2;
TF_RET_CHECK(proto.c128s_size() == complex_data_size_doubled);
for (int64_t i = 0, end = complex_data.size(); i < end; ++i) {
complex_data[i] =
complex128{proto.c128s(i * 2), proto.c128s(i * 2 + 1)};
}
break;
}
case TUPLE:
return InvalidArgument("Should not be called on tuple shapes: %s",
ShapeUtil::HumanString(subshape()));
default:
return InvalidArgument("Is called on unsupported shape: %s",
ShapeUtil::HumanString(subshape()));
}
return absl::OkStatus();
}
bool LiteralBase::Piece::IsKnown() const {
if (array_value_state_ != ArrayValueState::kKnown) {
return false;
}
if (subshape().IsTuple()) {
bool are_all_leaf_arrays_known = true;
ForEachSubpiece([&are_all_leaf_arrays_known](const ShapeIndex& index,
const Piece& piece) {
if (!piece.subshape().IsArray()) {
return;
}
are_all_leaf_arrays_known &= piece.IsKnown();
});
return are_all_leaf_arrays_known;
}
return true;
}
bool LiteralBase::Piece::IsDetermined() const {
if (array_value_state_ == ArrayValueState::kUndetermined) {
return false;
}
if (subshape().IsTuple()) {
bool are_all_leaf_arrays_determined = true;
ForEachSubpiece([&are_all_leaf_arrays_determined](const ShapeIndex& index,
const Piece& piece) {
if (!piece.subshape().IsArray()) {
return;
}
are_all_leaf_arrays_determined &= piece.IsDetermined();
});
return are_all_leaf_arrays_determined;
}
return true;
}
LiteralProto LiteralBase::ToProto() const {
LiteralProto proto;
root_piece().ForEachSubpiece(
[&](const ShapeIndex& index, const Piece& piece) {
LiteralProto* proto_piece = &proto;
for (int64_t i : index) {
while (proto_piece->tuple_literals_size() <= i) {
proto_piece->add_tuple_literals();
}
proto_piece = proto_piece->mutable_tuple_literals(i);
}
piece.WriteToProto(proto_piece);
});
return proto;
}
const void* LiteralBase::untyped_data(const ShapeIndex& shape_index) const {
return piece(shape_index).untyped_data();
}
void* MutableLiteralBase::untyped_data(const ShapeIndex& shape_index) {
return piece(shape_index).untyped_data();
}
int64_t LiteralBase::size_bytes(const ShapeIndex& shape_index) const {
return piece(shape_index).size_bytes_dense();
}
std::string LiteralBase::GetR1U8AsString() const {
CHECK(shape().IsArray());
CHECK_EQ(shape().rank(), 1);
CHECK_EQ(shape().element_type(), U8);
return std::string(absl::bit_cast<const char*>(data<uint8_t>().data()),
ShapeUtil::ElementsIn(shape()));
}
void MutableBorrowingLiteral::CopyPieceSubtree(const Shape& shape,
const Piece* src_piece,
Piece* dest_piece) {
DCHECK(ShapeUtil::Equal(src_piece->subshape(), dest_piece->subshape()))
<< "src_piece has shape: "
<< ShapeUtil::HumanString(src_piece->subshape())
<< "dest_piece has shape: "
<< ShapeUtil::HumanString(dest_piece->subshape());
dest_piece->set_array_value_state(src_piece->get_array_value_state());
if (shape.IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
Piece child_piece;
child_piece.set_subshape(&subshape);
CopyPieceSubtree(subshape, &src_piece->child(i), &child_piece);
dest_piece->emplace_back(std::move(child_piece));
}
} else if (shape.IsArray()) {
dest_piece->set_buffer(const_cast<char*>(src_piece->buffer()));
}
}
MutableLiteralBase::~MutableLiteralBase() = default;
MutableBorrowingLiteral::MutableBorrowingLiteral(
const MutableBorrowingLiteral& literal)
: MutableLiteralBase() {
shape_ = literal.shape_.Clone();
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal.root_piece(), root_piece_);
}
MutableBorrowingLiteral& MutableBorrowingLiteral::operator=(
const MutableBorrowingLiteral& literal) {
shape_ = literal.shape_.Clone();
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal.root_piece(), root_piece_);
return *this;
}
MutableBorrowingLiteral::MutableBorrowingLiteral(MutableLiteralBase* literal)
: MutableLiteralBase() {
shape_ = literal->shape_.Clone();
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal->root_piece(), root_piece_);
}
MutableBorrowingLiteral::MutableBorrowingLiteral(
MutableBorrowingLiteral literal, const ShapeIndex& view_root)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(literal.piece(view_root).subshape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal.piece(view_root), root_piece_);
}
MutableBorrowingLiteral::MutableBorrowingLiteral(const char* src_buf_ptr,
const Shape& shape)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(shape);
CHECK(LayoutUtil::HasLayout(*shape_));
CHECK(!shape_->IsTuple());
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
root_piece_->set_buffer(const_cast<char*>(src_buf_ptr));
}
MutableBorrowingLiteral::MutableBorrowingLiteral(absl::Span<char*> src_buf_ptrs,
const Shape& shape)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(shape);
if (!shape_->IsTuple()) {
CHECK_EQ(src_buf_ptrs.size(), 1);
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
root_piece_->set_buffer(const_cast<char*>(src_buf_ptrs[0]));
} else {
CHECK(!ShapeUtil::IsNestedTuple(*shape_));
CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
for (int i = 0; i < src_buf_ptrs.size(); ++i) {
Piece child_piece;
const auto& src_shape = shape_->tuple_shapes(i);
CHECK(src_shape.IsArray());
child_piece.set_subshape(&src_shape);
child_piece.set_buffer(src_buf_ptrs[i]);
root_piece_->emplace_back(std::move(child_piece));
}
}
}
MutableBorrowingLiteral::MutableBorrowingLiteral(ShapeTree<char*> src_buf_ptrs)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(src_buf_ptrs.shape());
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
BuildPieceSubtree(*shape_, root_piece_);
root_piece_->ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (ShapeUtil::GetSubshape(*shape_, index).IsTuple()) {
DCHECK_EQ(src_buf_ptrs.element(index), nullptr)
<< "Tuples should not have buffer pointers";
return;
}
piece->set_buffer(const_cast<char*>(src_buf_ptrs.element(index)));
});
}
MutableBorrowingLiteral::~MutableBorrowingLiteral() {
if (root_piece_ != nullptr) {
delete root_piece_;
}
}
LiteralSlice::LiteralSlice(const LiteralBase& literal)
: LiteralBase(), root_piece_(&literal.root_piece()) {}
LiteralSlice::LiteralSlice(const LiteralBase& literal,
const ShapeIndex& view_root)
: LiteralBase(), root_piece_(&literal.piece(view_root)) {}
BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
: LiteralBase(), shape_(std::make_unique<Shape>(shape)) {
CHECK(shape_->IsArray());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = Piece();
root_piece_.set_subshape(shape_.get());
root_piece_.set_buffer(const_cast<char*>(src_buf_ptr));
}
BorrowingLiteral::BorrowingLiteral(absl::Span<const char* const> src_buf_ptrs,
const Shape& shape)
: LiteralBase(), shape_(std::make_unique<Shape>(shape)) {
CHECK(shape_->IsTuple());
CHECK(!ShapeUtil::IsNestedTuple(*shape_));
CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));
root_piece_ = Piece();
root_piece_.set_subshape(shape_.get());
BuildPieceSubtree(*shape_, &root_piece_);
for (int i = 0, end = src_buf_ptrs.size(); i < end; ++i) {
const auto& src_shape = shape_->tuple_shapes(i);
CHECK(src_shape.IsArray());
root_piece_.child(i).set_buffer(const_cast<char*>(src_buf_ptrs[i]));
}
}
BorrowingLiteral::BorrowingLiteral(ShapeTree<const char*> src_buf_ptrs)
: LiteralBase(), shape_(std::make_unique<Shape>(src_buf_ptrs.shape())) {
root_piece_ = Piece();
root_piece_.set_subshape(shape_.get());
BuildPieceSubtree(*shape_, &root_piece_);
root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (ShapeUtil::GetSubshape(*shape_, index).IsTuple()) {
DCHECK_EQ(src_buf_ptrs.element(index), nullptr)
<< "Tuples should not have buffer pointers";
return;
}
piece->set_buffer(const_cast<char*>(src_buf_ptrs.element(index)));
});
}
} | #include "xla/literal.h"
#include <algorithm>
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <limits>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
class LiteralUtilTest : public ::testing::Test {
protected:
LiteralUtilTest() {
Array4D<float> arr4d({
{
{
{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
},
{
{11, 12, 13},
{14, 15, 16},
{17, 18, 19},
},
},
{
{
{101, 102, 103},
{104, 105, 106},
{107, 108, 109},
},
{
{201, 202, 203},
{204, 205, 206},
{207, 208, 209},
},
},
});
layout_r2_dim0major_ = LayoutUtil::MakeLayout({1, 0});
layout_r2_dim0minor_ = LayoutUtil::MakeLayout({0, 1});
layout_r3_dim0major_ = LayoutUtil::MakeLayout({2, 1, 0});
layout_r3_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2});
layout_r4_dim0major_ = LayoutUtil::MakeLayout({3, 2, 1, 0});
layout_r4_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2, 3});
literal_r4_2x2x3x3_dim0major_ =
LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
layout_r4_dim0major_);
literal_r4_2x2x3x3_dim0minor_ =
LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
layout_r4_dim0minor_);
}
Layout layout_r2_dim0major_;
Layout layout_r2_dim0minor_;
Layout layout_r3_dim0major_;
Layout layout_r3_dim0minor_;
Layout layout_r4_dim0major_;
Layout layout_r4_dim0minor_;
Literal literal_r4_2x2x3x3_dim0major_;
Literal literal_r4_2x2x3x3_dim0minor_;
};
template <typename T>
class LiteralUtilFloatTest : public LiteralUtilTest {};
using FloatTypes =
::testing::Types<float, half, bfloat16, tsl::float8_e3m4, tsl::float8_e4m3,
tsl::float8_e4m3fn, tsl::float8_e4m3fnuz,
tsl::float8_e4m3b11fnuz, tsl::float8_e5m2,
tsl::float8_e5m2fnuz>;
TYPED_TEST_SUITE(LiteralUtilFloatTest, FloatTypes);
TEST_F(LiteralUtilTest, LiteralScalarToString) {
auto true_lit = LiteralUtil::CreateR0<bool>(true);
EXPECT_EQ("pred[] true", true_lit.ToString());
auto false_lit = LiteralUtil::CreateR0<bool>(false);
EXPECT_EQ("pred[] false", false_lit.ToString());
auto u4_lit = LiteralUtil::CreateR0<u4>(u4(5));
EXPECT_EQ("u4[] 5", u4_lit.ToString());
auto u32_lit = LiteralUtil::CreateR0<uint32_t>(42);
EXPECT_EQ("u32[] 42", u32_lit.ToString());
auto s4_lit = LiteralUtil::CreateR0<s4>(s4(-3));
EXPECT_EQ("s4[] -3", s4_lit.ToString());
auto s32_lit = LiteralUtil::CreateR0<int32_t>(-999);
EXPECT_EQ("s32[] -999", s32_lit.ToString());
auto f32_lit = LiteralUtil::CreateR0<float>(3.14f);
EXPECT_EQ("f32[] 3.14", f32_lit.ToString());
auto f16_lit = LiteralUtil::CreateR0<half>(static_cast<half>(0.5f));
EXPECT_EQ("f16[] 0.5", f16_lit.ToString());
auto c64_lit = LiteralUtil::CreateR0<complex64>({3.14f, 2.78f});
EXPECT_EQ("c64[] (3.14, 2.78)", c64_lit.ToString());
auto c128_lit = LiteralUtil::CreateR0<complex128>({3.14, 2.78});
EXPECT_EQ("c128[] (3.14, 2.78)", c128_lit.ToString());
auto bf16_lit = LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(0.5f));
EXPECT_EQ("bf16[] 0.5", bf16_lit.ToString());
auto bf16_lit_truncated =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(3.14f));
ASSERT_EQ("bf16[] 3.141", bf16_lit_truncated.ToString());
auto bf16_lit_truncated2 =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(9.001f));
EXPECT_EQ("bf16[] 9", bf16_lit_truncated2.ToString());
auto f8e5m2_lit =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2(0.5));
EXPECT_EQ("f8e5m2[] 0.5", f8e5m2_lit.ToString());
auto f8e5m2_lit_truncated =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2(3.141));
EXPECT_EQ("f8e5m2[] 3", f8e5m2_lit_truncated.ToString());
auto f8e4m3_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3>(tsl::float8_e4m3(0.5));
EXPECT_EQ("f8e4m3[] 0.5", f8e4m3_lit.ToString());
auto f8e4m3fn_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3fn>(tsl::float8_e4m3fn(0.5));
EXPECT_EQ("f8e4m3fn[] 0.5", f8e4m3fn_lit.ToString());
auto f8e4m3b11fnuz_lit = LiteralUtil::CreateR0<tsl::float8_e4m3b11fnuz>(
tsl::float8_e4m3b11fnuz(0.5));
EXPECT_EQ("f8e4m3b11fnuz[] 0.5", f8e4m3b11fnuz_lit.ToString());
auto f8e4m3fnuz_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3fnuz>(tsl::float8_e4m3fnuz(0.5));
EXPECT_EQ("f8e4m3fnuz[] 0.5", f8e4m3fnuz_lit.ToString());
auto f8e5m2fnuz_lit =
LiteralUtil::CreateR0<tsl::float8_e5m2fnuz>(tsl::float8_e5m2fnuz(0.5));
EXPECT_EQ("f8e5m2fnuz[] 0.5", f8e5m2fnuz_lit.ToString());
auto f8e3m4_lit =
LiteralUtil::CreateR0<tsl::float8_e3m4>(tsl::float8_e3m4(0.5));
EXPECT_EQ("f8e3m4[] 0.5", f8e3m4_lit.ToString());
}
TEST_F(LiteralUtilTest, LiteralVectorToString) {
auto pred_vec = LiteralUtil::CreateR1<bool>({true, false, true});
EXPECT_EQ("pred[3] {1, 0, 1}", pred_vec.ToString());
}
TEST_F(LiteralUtilTest, R2ToString) {
const auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
const std::string expected = R"(s32[3,2] {
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R2DynamicToString) {
auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
literal.SetDynamicSize(0, {}, 2);
const std::string expected = R"(s32[<=3,2](2,2) {
{ 1, 2 },
{ 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
auto literal2 = LiteralUtil::CreateR2({{1, 2, 3}, {4, 5, 6}});
literal2.SetDynamicSize(1, {}, 2);
const std::string expected2 = R"(s32[2,<=3](2,2) {
{ 1, 2 },
{ 4, 5 }
})";
EXPECT_EQ(expected2, literal2.ToString());
}
TEST_F(LiteralUtilTest, R2BoolDynamicToString) {
auto literal = LiteralUtil::CreateR2<bool>(
{{true, true, true}, {true, true, true}, {true, true, true}});
literal.SetDynamicSize(0, {}, 2);
const std::string expected = R"(pred[<=3,3](2,3) {
{ 1, 1, 1 },
{ 1, 1, 1 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R3ToString) {
const auto literal =
LiteralUtil::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
const std::string expected = R"(s32[3,2,1] {
{
{1},
{2}
},
{
{3},
{4}
},
{
{5},
{6}
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R6ToString) {
const auto literal =
LiteralUtil::CreateFromDimensions(S32, {2, 2, 1, 1, 1, 2});
const std::string expected = R"(s32[2,2,1,1,1,2] {
{
{
{
{
{ 0, 0 }
}
}
},
{
{
{
{ 0, 0 }
}
}
}
},
{
{
{
{
{ 0, 0 }
}
}
},
{
{
{
{ 0, 0 }
}
}
}
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, TupleToString) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
const std::string expected = R"((
f32[] 1,
f32[2,2] {
{ 1, 2 },
{ 3, 4 }
}
))";
EXPECT_EQ(expected, tuple.ToString());
}
TEST_F(LiteralUtilTest, CreateR3FromArray3d) {
Array3D<float> array_3d({
{{1.0f, 2.0f},
{3.0f, 4.0f},
{5.0f, 6.0f}},
{{7.0f, 8.0f},
{9.0f, 10.0f},
{11.0f, 12.0f}},
});
auto literal = LiteralUtil::CreateR3FromArray3D(array_3d);
EXPECT_THAT(literal.shape().dimensions(), ElementsAre(2, 3, 2));
std::string result = literal.ToString();
const std::string expected = R"(f32[2,3,2] {
{
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
},
{
{ 7, 8 },
{ 9, 10 },
{ 11, 12 }
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) {
auto literal = LiteralUtil::CreateR4Projected<float>({
{1, 2},
{1001, 1002},
{2001, 2002},
}, 1, 2);
EXPECT_THAT(literal.shape().dimensions(), ElementsAre(1, 2, 3, 2));
std::string result = literal.ToString();
const std::string expected = R"(f32[1,2,3,2] {
{
{
{ 1, 2 },
{ 1001, 1002 },
{ 2001, 2002 }
},
{
{ 1, 2 },
{ 1001, 1002 },
{ 2001, 2002 }
}
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, LiteralR4F32Stringifies) {
EXPECT_THAT(literal_r4_2x2x3x3_dim0major_.shape().dimensions(),
ElementsAre(2, 2, 3, 3));
std::string result = literal_r4_2x2x3x3_dim0major_.ToString();
const std::string expected = R"(f32[2,2,3,3] {
{
{
{ 1, 2, 3 },
{ 4, 5, 6 },
{ 7, 8, 9 }
},
{
{ 11, 12, 13 },
{ 14, 15, 16 },
{ 17, 18, 19 }
}
},
{
{
{ 101, 102, 103 },
{ 104, 105, 106 },
{ 107, 108, 109 }
},
{
{ 201, 202, 203 },
{ 204, 205, 206 },
{ 207, 208, 209 }
}
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, EachCellR2F32) {
auto literal = LiteralUtil::CreateR2<float>({
{3.1f, 4.2f},
{9.3f, 12.4f},
});
std::vector<std::tuple<int64_t, int64_t, std::string>> seen;
literal.EachCellAsString(
[&seen](absl::Span<const int64_t> indices, const std::string& value) {
seen.emplace_back(indices[0], indices[1], value);
});
using Elem = std::tuple<int64_t, int64_t, std::string>;
std::vector<Elem> expected = {Elem(0, 0, "3.1"), Elem(0, 1, "4.2"),
Elem(1, 0, "9.3"), Elem(1, 1, "12.4")};
EXPECT_EQ(expected, seen);
}
TEST_F(LiteralUtilTest, ScalarEquality) {
auto f32_42 = LiteralUtil::CreateR0<float>(42.0);
auto f32_42_clone = LiteralUtil::CreateR0<float>(42.0);
EXPECT_EQ(f32_42, f32_42);
EXPECT_EQ(f32_42, f32_42_clone);
auto f32_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(f32_42, f32_123);
auto f64_42 = LiteralUtil::CreateR0<double>(42.0);
EXPECT_NE(f32_42, f64_42);
}
TEST_F(LiteralUtilTest, NonScalarEquality) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_different =
LiteralUtil::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}});
auto vector_literal = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
auto scalar = LiteralUtil::CreateR0<float>(1.0);
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(matrix, matrix);
EXPECT_EQ(matrix, matrix_clone);
EXPECT_NE(matrix, matrix_different);
EXPECT_NE(matrix, vector_literal);
EXPECT_NE(matrix, scalar);
EXPECT_NE(matrix, nil);
EXPECT_EQ(nil, nil);
}
TEST_F(LiteralUtilTest, TokenEquality) {
auto token0 = LiteralUtil::CreateToken();
auto token1 = LiteralUtil::CreateToken();
auto scalar = LiteralUtil::CreateR0<float>(1.0);
EXPECT_EQ(token0, token1);
EXPECT_NE(token0, scalar);
EXPECT_EQ(LiteralUtil::MakeTuple({&token0}),
LiteralUtil::MakeTuple({&token0}));
EXPECT_EQ(LiteralUtil::MakeTuple({&token0, &scalar}),
LiteralUtil::MakeTuple({&token1, &scalar}));
EXPECT_NE(LiteralUtil::MakeTuple({&token0, &scalar}),
LiteralUtil::MakeTuple({&scalar, &token1}));
}
TEST_F(LiteralUtilTest, DifferentLayoutEquality) {
Literal colmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1}));
colmajor.Set<float>({0, 0}, 1.0);
colmajor.Set<float>({0, 1}, 2.0);
colmajor.Set<float>({1, 0}, 3.0);
colmajor.Set<float>({1, 1}, 4.0);
Literal rowmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0}));
rowmajor.Set<float>({0, 0}, 1.0);
rowmajor.Set<float>({0, 1}, 2.0);
rowmajor.Set<float>({1, 0}, 3.0);
rowmajor.Set<float>({1, 1}, 4.0);
EXPECT_EQ(rowmajor, colmajor);
}
TEST_F(LiteralUtilTest, DifferentLayoutInEquality) {
Literal colmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1}));
colmajor.Set<float>({0, 0}, 1.0);
colmajor.Set<float>({0, 1}, 2.0);
colmajor.Set<float>({1, 0}, 3.0);
colmajor.Set<float>({1, 1}, 4.0);
Literal rowmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0}));
rowmajor.Set<float>({0, 0}, 1.0);
rowmajor.Set<float>({0, 1}, 2.0);
rowmajor.Set<float>({1, 0}, 3.0);
rowmajor.Set<float>({1, 1}, 4.0);
EXPECT_FALSE(rowmajor.Equal(colmajor, true));
EXPECT_FALSE(colmajor.Equal(rowmajor, true));
}
TEST_F(LiteralUtilTest, TupleEquality) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple1 = LiteralUtil::MakeTuple({&scalar, &matrix});
auto scalar_clone = LiteralUtil::CreateR0<float>(1.0);
auto tuple2 = LiteralUtil::MakeTuple({&scalar_clone, &matrix});
EXPECT_EQ(tuple1, tuple2);
auto reversed_tuple = LiteralUtil::MakeTuple({&matrix, &scalar});
EXPECT_NE(tuple1, reversed_tuple);
auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
auto different_tuple = LiteralUtil::MakeTuple({&scalar_42, &matrix});
EXPECT_NE(tuple1, different_tuple);
}
TEST_F(LiteralUtilTest, DynamicShapeEquality) {
auto r1 = LiteralUtil::CreateR1<float>({1.0, 2.0});
r1.SetDynamicSize(0, {}, 1);
auto r2 = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
r2.SetDynamicSize(0, {}, 1);
auto tuple1 = LiteralUtil::MakeTuple({&r1, &r2});
auto r1_clone = LiteralUtil::CreateR1<float>({1.0, 3.0});
r1_clone.SetDynamicSize(0, {}, 1);
auto tuple2 = LiteralUtil::MakeTuple({&r1_clone, &r2});
EXPECT_EQ(tuple1, tuple2);
auto r2_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
r2_clone.SetDynamicSize(0, {}, 2);
auto tuple_3 = LiteralUtil::MakeTuple({&r1_clone, &r2_clone});
EXPECT_NE(tuple1, tuple_3);
}
TEST_F(LiteralUtilTest, C64Equality) {
auto vector = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_clone =
LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(vector, vector_clone);
auto vector_reversed =
LiteralUtil::CreateR1<complex64>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(vector, vector_reversed);
}
TEST_F(LiteralUtilTest, C128Equality) {
auto vector = LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_clone =
LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(vector, vector_clone);
auto vector_reversed =
LiteralUtil::CreateR1<complex128>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(vector, vector_reversed);
}
TEST_F(LiteralUtilTest, IsAllTuple) {
auto element1 = LiteralUtil::CreateR0<float>(0.0);
auto element2 = LiteralUtil::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}});
auto tuple = LiteralUtil::MakeTuple({&element1, &element1});
EXPECT_FALSE(tuple.IsAll(0));
EXPECT_FALSE(tuple.IsAll(1));
}
TEST_F(LiteralUtilTest, CreateFromShapeTuple) {
auto scalar = LiteralUtil::CreateR0<float>(0.0);
auto matrix = LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto x = Literal::CreateFromShape(tuple.shape());
EXPECT_EQ(tuple, x);
}
TEST_F(LiteralUtilTest, IsAll) {
EXPECT_TRUE(LiteralUtil::CreateR0<bool>(false).IsAll(0));
EXPECT_TRUE(LiteralUtil::CreateR0<bool>(true).IsAll(1));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAll(1));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAll(2));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(0));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(2));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(-1));
auto int8_min = std::numeric_limits<int8_t>::min();
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(255).IsAll(int8_min));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(42.0).IsAll(42));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(42.0001).IsAll(42));
EXPECT_TRUE(LiteralUtil::CreateR1<int>({100, 100, 100}).IsAll(100));
EXPECT_FALSE(LiteralUtil::CreateR1<double>({100, 100, 100.001}).IsAll(100));
EXPECT_TRUE(LiteralUtil::CreateR2<uint64_t>({{8, 8}, {8, 8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>({{8, 8}, {8, 9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>({{9, 8}, {8, 8}}).IsAll(8));
half h8(8.0f);
half h9(9.0f);
EXPECT_TRUE(LiteralUtil::CreateR2<half>({{h8}, {h8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h8}, {h9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h9}, {h8}}).IsAll(8));
bfloat16 b8(8.0f);
bfloat16 b9(9.0f);
EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b9}, {b8}}).IsAll(8));
bfloat16 b91(9.001f);
bfloat16 b90(9.00f);
EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b91}, {b90}}).IsAll(9.0));
tsl::float8_e5m2 p16(8);
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e5m2>({p16}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e5m2>({p16}).IsAll(9));
tsl::float8_e4m3 q16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3>({q16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3>({q16}).IsAll(9));
tsl::float8_e4m3fn r16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3fn>({r16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3fn>({r16}).IsAll(9));
tsl::float8_e4m3b11fnuz s16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3b11fnuz>({s16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3b11fnuz>({s16}).IsAll(9));
tsl::float8_e4m3fnuz t16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3fnuz>({t16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3fnuz>({t16}).IsAll(9));
tsl::float8_e5m2fnuz u16(8);
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e5m2fnuz>({u16}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e5m2fnuz>({u16}).IsAll(9));
tsl::float8_e3m4 v16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e3m4>({v16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e3m4>({v16}).IsAll(9));
complex64 c8_9 = {8, 9};
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}}).IsAll(8));
auto uint64_max = std::numeric_limits<uint64_t>::max();
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>(
{{uint64_max, uint64_max}, {uint64_max, uint64_max}})
.IsAll(-1));
}
TEST_F(LiteralUtilTest, IsAllFloat) {
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int8_t>(0).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(0).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(.5).IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(-.5).IsAllFloat(-.5));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(-.5).IsAllFloat(-.49));
EXPECT_FALSE(
LiteralUtil::CreateR2<float>({{0, 0, 0}, {0, .1, 0}}).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}})
.IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(.5).IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(-.5).IsAllFloat(-.5));
EXPECT_FALSE(LiteralUtil::CreateR0<double>(-.5).IsAllFloat(-.49));
EXPECT_FALSE(
LiteralUtil::CreateR2<double>({{0, 0, 0}, {0, .1, 0}}).IsAllFloat(0));
EXPECT_TRUE(
LiteralUtil::CreateR0<bfloat16>(bfloat16(128.)).IsAllFloat(128.5));
}
TEST_F(LiteralUtilTest, IsAllComplex) {
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int8_t>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<double>(0).IsAllComplex(0));
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}})
.IsAllComplex({8.0f, 9.0f}));
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}})
.IsAllComplex({8.0f, 9.0f}));
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c7_9}})
.IsAllComplex({8.0f, 9.0f}));
}
TEST_F(LiteralUtilTest, IsAllFirst) {
EXPECT_FALSE(LiteralUtil::CreateR1<bool>({false, true}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<bool>({false, false}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<int8_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<int8_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<uint8_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<int32_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<int32_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<uint32_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<uint32_t>({1, 1, 2}).IsAllFirst());
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}}).IsAllFirst());
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
int old_csr = _mm_getcsr();
_mm_setcsr(old_csr | _MM_DENORMALS_ZERO_ON);
#endif
bool eq0 = LiteralUtil::CreateR1<float>({0.0, 1.401298e-45}).IsAllFirst();
bool eq1 = LiteralUtil::CreateR1<float>({0.0, 2.802597e-45}).IsAllFirst();
bool eq2 =
LiteralUtil::CreateR1<float>({4.203895e-45, 7.006492e-45}).IsAllFirst();
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
_mm_setcsr(old_csr);
#endif
EXPECT_FALSE(eq0);
EXPECT_FALSE(eq1);
EXPECT_FALSE(eq2);
}
TEST_F(LiteralUtilTest, CountEqualInt) {
EXPECT_EQ(LiteralUtil::CreateR1<int8_t>({}).CountEqual<int8_t>(1), 0);
EXPECT_EQ(
LiteralUtil::CreateR1<int8_t>({1, 2, 3, 4, 5, 100}).CountEqual<int8_t>(2),
1);
EXPECT_EQ(LiteralUtil::CreateR1<int8_t>({0, 3, 6, 0, 9, 18, 0})
.CountEqual<int8_t>(0),
3);
EXPECT_EQ(LiteralUtil::CreateR1<int32_t>({234, 345, 4, 45, 5467, 5467, 5467})
.CountEqual<int32_t>(5467),
3);
}
TEST_F(LiteralUtilTest, CountEqualFloat) {
EXPECT_EQ(LiteralUtil::CreateR1<float>({}).CountEqual<float>(0), 0);
EXPECT_EQ(LiteralUtil::CreateR1<float>({1.1, 2.2, 3.3, 4.4, 5.5, 100.6})
.CountEqual<float>(3.3),
1);
EXPECT_EQ(LiteralUtil::CreateR1<float>({7.62, 3, 7.75, 7.62, 7.3, 2, 7.62})
.CountEqual<float>(7.62),
3);
EXPECT_EQ(LiteralUtil::CreateR1<float>(
{NAN, 0, 6.8, NAN, NAN, NAN, 63.12, 24.6, NAN})
.CountEqual<float>(NAN),
5);
}
TEST_F(LiteralUtilTest, CountEqualBool) {
EXPECT_EQ(LiteralUtil::CreateR1<bool>({false, true}).CountEqual<bool>(false),
1);
}
TEST_F(LiteralUtilTest, CountEqualComplex) {
EXPECT_EQ(LiteralUtil::CreateR1<std::complex<double>>(
{std::complex<float>(1, 2), std::complex<float>(3, 4),
std::complex<float>(5, 6), std::complex<float>(6, 7)})
.CountEqual<float>(std::complex<float>(5, 6)),
1);
}
TEST_F(LiteralUtilTest, CountEqualMismatched) {
EXPECT_EQ(LiteralUtil::CreateR1<float>({13, 10.5, 15.6, 22.7})
.CountEqual<int8_t>(13),
1);
EXPECT_EQ(
LiteralUtil::CreateR1<float>({10.5, 15.6, 22.7}).CountEqual<int8_t>(1),
0);
EXPECT_EQ(LiteralUtil::CreateR1<std::complex<float>>(
{std::complex<float>(1, 2), std::complex<float>(3, 4),
std::complex<float>(5, 6), std::complex<float>(6, 7)})
.CountEqual<float>(1),
0);
}
TEST_F(LiteralUtilTest, IsZero) {
auto scalar_zero = LiteralUtil::CreateR0<float>(0.0f);
auto scalar_one = LiteralUtil::CreateR0<float>(1.0f);
EXPECT_TRUE(scalar_zero.IsZero({}));
EXPECT_FALSE(scalar_one.IsZero({}));
auto array = LiteralUtil::CreateR2<uint32_t>({{1, 2, 0, 3}, {1, 0, 1, 2}});
EXPECT_FALSE(array.IsZero({0, 1}));
EXPECT_TRUE(array.IsZero({0, 2}));
EXPECT_TRUE(array.IsZero({1, 1}));
EXPECT_FALSE(array.IsZero({1, 2}));
auto complex_zero = LiteralUtil::CreateR0<complex64>(0.0f);
auto complex_nonzero = LiteralUtil::CreateR0<complex64>(0.5f);
EXPECT_TRUE(complex_zero.IsZero({}));
EXPECT_FALSE(complex_nonzero.IsZero({}));
}
template <typename T>
class LiteralUtilTestTemplated : public ::testing::Test {};
using TestedTypes = ::testing::Types<float, int32_t, uint32_t, complex64>;
class TestNamer {
public:
template <typename TypeParam>
static std::string GetName(int) {
return ::testing::internal::GetTypeName<TypeParam>();
}
};
TYPED_TEST_SUITE(LiteralUtilTestTemplated, TestedTypes, TestNamer);
TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) {
TypeParam half = TypeParam(1) / TypeParam(2);
auto data = LiteralUtil::CreateR2<TypeParam>({{half, 2}, {3, 4}});
const Layout layout01 = LayoutUtil::MakeLayout({0, 1});
const Layout layout10 = LayoutUtil::MakeLayout({1, 0});
auto data01 = data.Relayout(layout01);
EXPECT_TRUE(LayoutUtil::Equal(data01.shape().layout(), layout01));
EXPECT_EQ(data, data01);
auto data10 = data.Relayout(layout10);
EXPECT_TRUE(LayoutUtil::Equal(data10.shape().layout(), layout10));
EXPECT_EQ(data, data10);
}
TEST_F(LiteralUtilTest, ReshapeR0) {
auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original.Reshape({}).value();
EXPECT_EQ(original, reshape);
}
TEST_F(LiteralUtilTest, ReshapeR4) {
auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
}, layout_r3_dim0major_);
auto reshape = original.Reshape({3, 4, 2}).value();
EXPECT_EQ(expected, reshape);
}
TEST_F(LiteralUtilTest, ReshapeR4Dim0Minor) {
auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0minor_);
auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
}, layout_r3_dim0major_);
auto reshape = original.Reshape({3, 4, 2}).value();
EXPECT_EQ(expected, reshape);
}
TEST_F(LiteralUtilTest, TransposeR0) {
auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original.Transpose({});
EXPECT_EQ(original, reshape);
}
TEST_F(LiteralUtilTest, TransposeR4) {
auto original = LiteralUtil::CreateR4<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}});
auto reshape = original.Transpose({2, 3, 0, 1});
reshape.EachCell<float>([&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>(
{indices[2], indices[3], indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, TransposeDynamicR2) {
auto original = LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}});
original.SetDynamicSize(1, 1);
auto reshape = original.Transpose({1, 0});
reshape.EachCell<float>([&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[1], indices[0]}));
});
}
TEST_F(LiteralUtilTest, ToStaticR2) {
auto original = LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}});
original.SetDynamicSize(1, 1);
auto static_literal = original.ToStatic();
EXPECT_EQ(static_literal.shape(), ShapeUtil::MakeShape(F32, {2, 1}));
EXPECT_TRUE(static_literal.shape().is_static());
static_literal.EachCell<float>(
[&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, ToBoundedDynamicR2) {
auto original = LiteralUtil::CreateR2<float>({{1}, {4}});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3}, {false, true});
auto dynamic_literal = original.ToBoundedDynamic(dynamic_shape);
EXPECT_EQ(dynamic_literal.shape(), dynamic_shape);
dynamic_literal.EachCell<float>(
[&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, TestR4RelayoutEquivalence) {
auto dim0minor_relaid_to_dim0major =
literal_r4_2x2x3x3_dim0minor_.Relayout(layout_r4_dim0major_);
EXPECT_EQ(literal_r4_2x2x3x3_dim0major_, dim0minor_relaid_to_dim0major);
auto dim0major_relaid_to_dim0minor =
literal_r4_2x2x3x3_dim0major_.Relayout(layout_r4_dim0minor_);
EXPECT_EQ(literal_r4_2x2x3x3_dim0minor_, dim0major_relaid_to_dim0minor);
}
template <bool kIsLayoutSensitive>
struct HashTester {
template <typename H>
friend H AbslHashValue(H h, const HashTester& key) {
return Literal::Hash<H, kIsLayoutSensitive, 64>(
std::move(h), *key.literal);
}
const Literal* literal;
};
TEST_F(LiteralUtilTest, TestR2LinearLayout) {
auto mat_dim0minor = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0minor_);
EXPECT_EQ(mat_dim0minor.element_count(), 6);
EXPECT_THAT(mat_dim0minor.data<int32_t>(), ElementsAre(1, 4, 2, 5, 3, 6));
auto relaid_mat_to_dim0major = mat_dim0minor.Relayout(layout_r2_dim0major_);
EXPECT_THAT(relaid_mat_to_dim0major.data<int32_t>(),
ElementsAre(1, 2, 3, 4, 5, 6));
EXPECT_EQ(absl::HashOf(HashTester<false>{&mat_dim0minor}),
absl::HashOf(HashTester<false>{&relaid_mat_to_dim0major}));
auto mat_dim0major = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0major_);
EXPECT_EQ(mat_dim0major.element_count(), 6);
EXPECT_THAT(mat_dim0major.data<int32_t>(), ElementsAre(1, 2, 3, 4, 5, 6));
auto relaid_mat_to_dim0minor = mat_dim0major.Relayout(layout_r2_dim0minor_);
EXPECT_THAT(relaid_mat_to_dim0minor.data<int32_t>(),
ElementsAre(1, 4, 2, 5, 3, 6));
EXPECT_EQ(absl::HashOf(HashTester<false>{&mat_dim0major}),
absl::HashOf(HashTester<false>{&relaid_mat_to_dim0minor}));
EXPECT_EQ(absl::HashOf(HashTester<true>{&mat_dim0minor}),
absl::HashOf(HashTester<true>{&relaid_mat_to_dim0minor}));
EXPECT_EQ(absl::HashOf(HashTester<true>{&mat_dim0major}),
absl::HashOf(HashTester<true>{&relaid_mat_to_dim0major}));
}
TEST_F(LiteralUtilTest, TestR3LinearLayout) {
Array3D<int> arr3d(
{
{
{1, 2, 3},
{4, 5, 6},
},
{
{7, 8, 9},
{10, 11, 12},
},
});
auto lit_dim0minor = LiteralUtil::CreateR3FromArray3DWithLayout<int>(
arr3d, layout_r3_dim0minor_);
EXPECT_EQ(lit_dim0minor.element_count(), 12);
std::vector<int> expected_dim0minor{1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12};
EXPECT_THAT(lit_dim0minor.data<int32_t>(),
testing::ElementsAreArray(expected_dim0minor));
auto relaid_lit_to_dim0major = lit_dim0minor.Relayout(layout_r3_dim0major_);
std::vector<int> expected_dim0major{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
EXPECT_THAT(relaid_lit_to_dim0major.data<int32_t>(),
testing::ElementsAreArray(expected_dim0major));
auto lit_dim0major = LiteralUtil::CreateR3FromArray3DWithLayout<int>(
arr3d, layout_r3_dim0major_);
EXPECT_EQ(lit_dim0major.element_count(), 12);
EXPECT_THAT(lit_dim0major.data<int32_t>(),
testing::ElementsAreArray(expected_dim0major));
auto relaid_lit_to_dim0minor = lit_dim0major.Relayout(layout_r3_dim0minor_);
EXPECT_THAT(relaid_lit_to_dim0minor.data<int32_t>(),
testing::ElementsAreArray(expected_dim0minor));
}
TEST_F(LiteralUtilTest, SliceR0S32) {
auto input = LiteralUtil::CreateR0<int32_t>(1);
auto result = input.Slice({}, {});
EXPECT_EQ(input, result);
}
TEST_F(LiteralUtilTest, SliceR1F32) {
auto input = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0, 5.0});
auto result = input.Slice({3}, {4});
auto expected = LiteralUtil::CreateR1<float>({4.0});
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, SliceR2U32) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
auto result = input_3x4.Slice({0, 2}, {2, 4});
auto expected = LiteralUtil::CreateR2<uint32_t>({{3, 4}, {7, 8}});
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, SliceR3U32Full) {
auto input_2x3x2 = LiteralUtil::CreateR3<uint32_t>(
{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}});
auto result = input_2x3x2.Slice({0, 0, 0}, {2, 3, 2});
EXPECT_EQ(input_2x3x2, result);
}
TEST_F(LiteralUtilTest, SliceR2Dynamic) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
input_3x4.SetDynamicSize(1, 3);
auto result = input_3x4.Slice({0, 1}, {2, 2});
auto expected = LiteralUtil::CreateR2<uint32_t>({{2}, {6}});
EXPECT_EQ(expected, result);
EXPECT_EQ(result.GetDynamicSize(1), 1);
}
TEST_F(LiteralUtilTest, SliceR2DynamicInBound) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
input_3x4.SetDynamicSize(1, 1);
auto result = input_3x4.Slice({0, 0}, {2, 2});
auto expected = LiteralUtil::CreateR2<uint32_t>({{1}, {5}});
EXPECT_EQ(expected, result);
EXPECT_EQ(result.GetDynamicSize(1), 1);
}
TEST_F(LiteralUtilTest, SliceR2DynamicOutOfBound) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
input_3x4.SetDynamicSize(1, 1);
auto result = input_3x4.Slice({0, 1}, {2, 3});
auto expected = LiteralUtil::CreateR2<uint32_t>({{}, {}});
EXPECT_EQ(expected, result);
EXPECT_EQ(result.GetDynamicSize(1), 0);
}
TEST_F(LiteralUtilTest, PopulateR1S64) {
Literal output(ShapeUtil::MakeShape(S64, {1}));
output.PopulateR1<int64_t>({77});
auto expected = LiteralUtil::CreateR1<int64_t>({77});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR1U64) {
Literal output(ShapeUtil::MakeShape(U64, {2}));
output.PopulateR1<uint64_t>({{77, 88}});
auto expected = LiteralUtil::CreateR1<uint64_t>({{77, 88}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR1C64) {
Literal output(ShapeUtil::MakeShape(C64, {1}));
output.PopulateR1<complex64>({{77, 88}});
auto expected = LiteralUtil::CreateR1<complex64>({{77, 88}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR1C128) {
Literal output(ShapeUtil::MakeShape(C128, {1}));
output.PopulateR1<complex128>({{77, 88}});
auto expected = LiteralUtil::CreateR1<complex128>({{77, 88}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR2C64) {
Literal output(ShapeUtil::MakeShape(C64, {2, 2}));
output.PopulateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
auto expected =
LiteralUtil::CreateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
EXPECT_EQ(output, expected);
}
TYPED_TEST(LiteralUtilFloatTest, PopulateWithValueR0Float) {
Literal output(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<TypeParam>(), {}));
TypeParam h(0.25f);
output.PopulateWithValue<TypeParam>(h);
auto expected = LiteralUtil::CreateR0<TypeParam>(h);
EXPECT_EQ(output, expected);
}
TYPED_TEST(LiteralUtilFloatTest, PopulateWithValueR1Float) {
Literal output(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<TypeParam>(), {3}));
TypeParam h(0.5f);
output.PopulateWithValue<TypeParam>(h);
auto expected = LiteralUtil::CreateR1<TypeParam>({h, h, h});
EXPECT_EQ(output, expected);
}
TYPED_TEST(LiteralUtilFloatTest, PopulateWithValueR2Float) {
Literal output(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<TypeParam>(), {2, 2}));
TypeParam h(2.0f);
output.PopulateWithValue<TypeParam>(h);
auto expected = LiteralUtil::CreateR2<TypeParam>({{h, h}, {h, h}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR1S64) {
Literal output(ShapeUtil::MakeShape(S64, {3}));
output.PopulateWithValue<int64_t>(-7);
auto expected = LiteralUtil::CreateR1<int64_t>({-7, -7, -7});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2U64) {
Literal output(ShapeUtil::MakeShape(U64, {2, 2}));
output.PopulateWithValue<uint64_t>(42);
auto expected = LiteralUtil::CreateR2<uint64_t>({{42, 42}, {42, 42}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2C64) {
Literal output(ShapeUtil::MakeShape(C64, {2, 2}));
output.PopulateWithValue<complex64>({4, 2});
auto expected =
LiteralUtil::CreateR2<complex64>({{{4, 2}, {4, 2}}, {{4, 2}, {4, 2}}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2C128) {
Literal output(ShapeUtil::MakeShape(C128, {2, 2}));
output.PopulateWithValue<complex128>({4, 2});
auto expected =
LiteralUtil::CreateR2<complex128>({{{4, 2}, {4, 2}}, {{4, 2}, {4, 2}}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, ReplicateR2U32) {
auto input = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
auto output = input.Replicate<uint32_t>(3);
auto expected = LiteralUtil::CreateR3<uint32_t>(
{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, CopySliceFrom) {
const int64_t dimensions[] = {17, 15, 34, 21};
const int64_t layouts[][4] = {
{3, 2, 1, 0}, {0, 2, 1, 3}, {0, 1, 2, 3}, {2, 0, 3, 1}, {1, 3, 0, 2}};
for (const auto& layout : layouts) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<uint32_t>(), dimensions, layout);
auto source = Literal::CreateFromShape(shape);
const int64_t zero_base[] = {0, 0, 0, 0};
const int64_t step[] = {1, 1, 1, 1};
uint32_t seqnr = 0;
auto init_proc = [&](absl::Span<const int64_t> indexes) {
source.Set(indexes, ++seqnr);
return true;
};
ShapeUtil::ForEachIndex(source.shape(), zero_base, dimensions, step,
init_proc);
auto blank = Literal::CreateFromShape(shape);
const int64_t src_base[] = {3, 1, 5, 7};
const int64_t dest_base[] = {6, 4, 12, 2};
const int64_t copy_size[] = {7, 8, 11, 9};
TF_EXPECT_OK(blank.CopySliceFrom(source, src_base, dest_base, copy_size));
std::vector<int64_t> source_indexes(TF_ARRAYSIZE(dimensions), 0);
std::vector<int64_t> blank_indexes(TF_ARRAYSIZE(dimensions), 0);
bool matched = true;
auto check_proc = [&](absl::Span<const int64_t> indexes) {
std::copy(indexes.begin(), indexes.end(), source_indexes.begin());
std::transform(source_indexes.begin(), source_indexes.end(), src_base,
source_indexes.begin(), std::plus<int64_t>());
std::copy(indexes.begin(), indexes.end(), blank_indexes.begin());
std::transform(blank_indexes.begin(), blank_indexes.end(), dest_base,
blank_indexes.begin(), std::plus<int64_t>());
auto bval = blank.Get<uint32_t>(blank_indexes);
matched = (bval != 0 && bval == source.Get<uint32_t>(source_indexes));
return matched;
};
ShapeUtil::ForEachIndex(source.shape(), zero_base, copy_size, step,
check_proc);
EXPECT_TRUE(matched);
}
}
TEST_F(LiteralUtilTest, CopyFromScalars) {
auto zero = LiteralUtil::CreateR0<uint32_t>(0);
auto nine = LiteralUtil::CreateR0<uint32_t>(9);
TF_EXPECT_OK(zero.CopyFrom(nine));
EXPECT_EQ(zero, nine);
auto vect = LiteralUtil::CreateR1<uint32_t>({3, 4, 9, 12, 5, 17, 21});
TF_EXPECT_OK(zero.CopySliceFrom(vect, {5}, {}, {}));
EXPECT_EQ(zero.Get<uint32_t>({}), 17);
TF_EXPECT_OK(vect.CopySliceFrom(zero, {}, {4}, {}));
EXPECT_EQ(vect.Get<uint32_t>({4}), 17);
}
TEST_F(LiteralUtilTest, CopyFromAndToZeroElement) {
const Shape empty_r1_shape = ShapeUtil::MakeShape(F32, {0});
const auto const_nine = LiteralUtil::CreateR1<float>({9});
const auto const_empty = Literal::CreateFromShape(empty_r1_shape);
{
const auto empty = Literal::CreateFromShape(empty_r1_shape);
auto nine = LiteralUtil::CreateR1<float>({9});
TF_EXPECT_OK(nine.CopySliceFrom(empty, {0}, {0}, {0}));
EXPECT_EQ(nine, const_nine);
}
{
auto empty = Literal::CreateFromShape(empty_r1_shape);
auto nine = LiteralUtil::CreateR1<float>({9});
TF_EXPECT_OK(empty.CopySliceFrom(nine, {0}, {0}, {0}));
EXPECT_EQ(empty, const_empty);
}
}
TEST_F(LiteralUtilTest, CopyFromNilShape) {
Literal nil_literal0(ShapeUtil::MakeNil());
Literal nil_literal1(ShapeUtil::MakeNil());
TF_ASSERT_OK(nil_literal0.CopyFrom(nil_literal1));
}
TEST_F(LiteralUtilTest, CopyFromArrays) {
auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
auto scalar_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(scalar_42, scalar_123);
TF_ASSERT_OK(scalar_42.CopyFrom(scalar_123, {},
{}));
EXPECT_EQ(scalar_42, scalar_123);
EXPECT_EQ(scalar_42.Get<float>({}), 123.0f);
auto matrix_1234 = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_5678 = LiteralUtil::CreateR2<float>({{5.0, 6.0}, {7.0, 8.0}});
EXPECT_NE(matrix_1234, matrix_5678);
EXPECT_EQ(matrix_1234.Get<float>({0, 0}), 1.0f);
TF_ASSERT_OK(matrix_1234.CopyFrom(matrix_5678, {},
{}));
EXPECT_EQ(matrix_1234, matrix_5678);
EXPECT_EQ(matrix_1234.Get<float>({0, 0}), 5.0f);
}
TEST_F(LiteralUtilTest, CopyFromTuples) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Literal nil_literal(ShapeUtil::MakeNil());
Literal inner_elements[] = {LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR1<double>({23.0, 44.0})};
Literal inner_tuple = LiteralUtil::MakeTuple(
{&inner_elements[0], &inner_elements[1], &nil_literal});
Literal nested_tuple = LiteralUtil::MakeTuple({&matrix, &inner_tuple});
Literal int32_minus5 = LiteralUtil::CreateR0<int32_t>(-5);
Literal double_2_4 = LiteralUtil::CreateR1<double>({2.0, 4.0});
Literal tuple =
LiteralUtil::MakeTuple({&int32_minus5, &double_2_4, &nil_literal});
EXPECT_EQ(matrix, LiteralSlice(nested_tuple, {0}));
EXPECT_EQ(nested_tuple.Get<int32_t>({}, {1, 0}), 42);
EXPECT_EQ(nested_tuple.Get<double>({0}, {1, 1}), 23.0);
EXPECT_EQ(nested_tuple.Get<double>({1}, {1, 1}), 44.0);
TF_ASSERT_OK(nested_tuple.CopyFrom(tuple, {1},
{}));
EXPECT_EQ(matrix, LiteralSlice(nested_tuple, {0}));
EXPECT_EQ(nested_tuple.Get<int32_t>({}, {1, 0}), -5);
EXPECT_EQ(nested_tuple.Get<double>({0}, {1, 1}), 2.0);
EXPECT_EQ(nested_tuple.Get<double>({1}, {1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, CopyBetweenSameTuple) {
Literal elements[] = {LiteralUtil::CreateR0<int32_t>(-2),
LiteralUtil::CreateR0<int32_t>(4)};
Literal tuple = LiteralUtil::MakeTuple({&elements[0], &elements[1]});
EXPECT_EQ(tuple.Get<int32_t>({}, {0}), -2);
EXPECT_EQ(tuple.Get<int32_t>({}, {1}), 4);
TF_ASSERT_OK(tuple.CopyFrom(tuple, {1},
{0}));
EXPECT_EQ(tuple.Get<int32_t>({}, {0}), -2);
EXPECT_EQ(tuple.Get<int32_t>({}, {1}), -2);
}
TEST_F(LiteralUtilTest, CopyFromDifferentShapes) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto vector = LiteralUtil::CreateR1<float>({5.0, 7.0});
absl::Status status = matrix.CopyFrom(vector);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Destination subshape incompatible"));
}
TEST_F(LiteralUtilTest, F16) {
Literal m1 = Literal::CreateFromShape(ShapeUtil::MakeShape(F16, {2, 2}));
const char* d1 = reinterpret_cast<const char*>(m1.data<half>().data());
EXPECT_EQ(d1[0], 0);
EXPECT_EQ(d1[1], 0);
EXPECT_EQ(d1[2], 0);
EXPECT_EQ(d1[3], 0);
EXPECT_EQ(d1[4], 0);
EXPECT_EQ(d1[5], 0);
EXPECT_EQ(d1[6], 0);
EXPECT_EQ(d1[7], 0);
half h1(1.0f);
half h2(2.0f);
auto m2 = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}});
const uint16_t* d2 =
reinterpret_cast<const uint16_t*>(m2.data<half>().data());
EXPECT_EQ(d2[0], 0x3C00);
EXPECT_EQ(d2[1], 0x4000);
EXPECT_EQ(d2[2], 0x4000);
EXPECT_EQ(d2[3], 0x3C00);
}
TEST_F(LiteralUtilTest, Populate) {
struct PopulateData {
std::vector<int64_t> dimensions;
std::vector<int64_t> layout;
} populate_data[] = {
{{}, {}},
{{0}, {0}},
{{16}, {0}},
{{2, 0}, {1, 0}},
{{4, 16}, {1, 0}},
{{21, 12}, {0, 1}},
{{6, 11, 17}, {2, 0, 1}},
{{6, 11, 5, 17}, {3, 2, 0, 1}},
};
for (const auto& data : populate_data) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<uint32_t>(), data.dimensions,
data.layout);
Literal literal(shape);
auto generator = [&](absl::Span<const int64_t> indexes) -> uint32_t {
return IndexUtil::MultidimensionalIndexToLinearIndex(literal.shape(),
indexes) +
17;
};
TF_EXPECT_OK(literal.Populate<uint32_t>(generator));
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
bool matched = true;
auto check_function = [&](absl::Span<const int64_t> indexes) {
auto value = literal.Get<uint32_t>(indexes);
matched = matched && (value == generator(indexes));
return matched;
};
ShapeUtil::ForEachIndex(literal.shape(), zero_base, data.dimensions, step,
check_function);
EXPECT_TRUE(matched);
}
}
TEST_F(LiteralUtilTest, PopulateParallel) {
struct PopulateData {
std::vector<int64_t> dimensions;
std::vector<int64_t> layout;
} populate_data[] = {
{{}, {}},
{{0}, {0}},
{{16}, {0}},
{{2, 0}, {1, 0}},
{{4, 16}, {1, 0}},
{{21, 12}, {0, 1}},
{{6, 11, 17}, {2, 0, 1}},
{{6, 11, 5, 17}, {3, 2, 0, 1}},
};
for (const auto& data : populate_data) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<uint32_t>(), data.dimensions,
data.layout);
Literal literal(shape);
auto generator = [&](absl::Span<const int64_t> indexes,
int ) -> uint32_t {
return IndexUtil::MultidimensionalIndexToLinearIndex(literal.shape(),
indexes) +
17;
};
TF_EXPECT_OK(literal.PopulateParallel<uint32_t>(generator));
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
bool matched = true;
auto check_function = [&](absl::Span<const int64_t> indexes) {
auto value = literal.Get<uint32_t>(indexes);
matched = matched && (value == generator(indexes, -1));
return matched;
};
ShapeUtil::ForEachIndex(literal.shape(), zero_base, data.dimensions, step,
check_function);
EXPECT_TRUE(matched);
}
}
TEST_F(LiteralUtilTest, ConvertR4) {
auto original = LiteralUtil::CreateR4WithLayout<int8_t>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
auto expected = LiteralUtil::CreateR4WithLayout<uint32_t>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
TF_ASSERT_OK_AND_ASSIGN(Literal converted, original.Convert(U32));
EXPECT_EQ(expected, converted);
}
TEST_F(LiteralUtilTest, ConvertIfTypesMatch) {
auto s8 = LiteralUtil::CreateR4WithLayout<int8_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto s16 = LiteralUtil::CreateR4WithLayout<int16_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto s32 = LiteralUtil::CreateR4WithLayout<int32_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto u16 = LiteralUtil::CreateR4WithLayout<uint16_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto u32 = LiteralUtil::CreateR4WithLayout<uint32_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto s64 = LiteralUtil::CreateR4WithLayout<int64_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto u64 = LiteralUtil::CreateR4WithLayout<uint64_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto pred = LiteralUtil::CreateR4WithLayout<bool>({{
{{true, false, true, false}, {false, true, false, true}},
{{false, true, false, true}, {true, false, true, false}},
{{true, false, true, false}, {false, true, false, true}},
}}, layout_r4_dim0major_);
auto int32_pred = LiteralUtil::CreateR4WithLayout<int32_t>({{
{{1, 0, 1, 0}, {0, 1, 0, 1}},
{{0, 1, 0, 1}, {1, 0, 1, 0}},
{{1, 0, 1, 0}, {0, 1, 0, 1}},
}}, layout_r4_dim0major_);
auto s4nums = LiteralUtil::CreateR4WithLayout<s4>({{
{{s4(1), s4(0), s4(2), s4(0)}, {s4(0), s4(5), s4(0), s4(7)}},
{{s4(0), s4(1), s4(0), s4(1)}, {s4(2), s4(0), s4(4), s4(0)}},
{{s4(2), s4(0), s4(2), s4(0)}, {s4(0), s4(3), s4(0), s4(3)}},
}}, layout_r4_dim0major_);
auto int32_s4nums = LiteralUtil::CreateR4WithLayout<int32_t>({{
{{1, 0, 2, 0}, {0, 5, 0, 7}},
{{0, 1, 0, 1}, {2, 0, 4, 0}},
{{2, 0, 2, 0}, {0, 3, 0, 3}},
}}, layout_r4_dim0major_);
auto f16 = LiteralUtil::CreateR4WithLayout<half>({{
{{half(10.0), half(0.0), half(12.0), half(0.0)},
{half(0.0), half(15.0), half(0.0), half(17.0)}},
{{half(0.0), half(19.0), half(0.0), half(21.0)},
{half(22.0), half(0.0), half(24.0), half(0.0)}},
{{half(26.0), half(0.0), half(28.0), half(0.0)},
{half(0.0), half(31.0), half(0.0), half(33.0)}},
}}, layout_r4_dim0major_);
auto bf16 = LiteralUtil::CreateR4WithLayout<bfloat16>({{
{{bfloat16(10.0), bfloat16(0.0), bfloat16(12.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(15.0), bfloat16(0.0), bfloat16(17.0)}},
{{bfloat16(0.0), bfloat16(19.0), bfloat16(0.0), bfloat16(21.0)},
{bfloat16(22.0), bfloat16(0.0), bfloat16(24.0), bfloat16(0.0)}},
{{bfloat16(26.0), bfloat16(0.0), bfloat16(28.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(31.0), bfloat16(0.0), bfloat16(33.0)}},
}}, layout_r4_dim0major_);
auto f32 = LiteralUtil::CreateR4WithLayout<float>({{
{{10.0f, 0.0f, 12.0f, 0.0f}, {0.0f, 15.0f, 0.0f, 17.0f}},
{{0.0f, 19.0f, 0.0f, 21.0f}, {22.0f, 0.0f, 24.0f, 0.0f}},
{{26.0f, 0.0f, 28.0f, 0.0f}, {0.0f, 31.0f, 0.0f, 33.0f}},
}}, layout_r4_dim0major_);
auto f64 = LiteralUtil::CreateR4WithLayout<double>({{
{{10.0, 0.0, 12.0, 0.0}, {0.0, 15.0, 0.0, 17.0}},
{{0.0, 19.0, 0.0, 21.0}, {22.0, 0.0, 24.0, 0.0}},
{{26.0, 0.0, 28.0, 0.0}, {0.0, 31.0, 0.0, 33.0}},
}}, layout_r4_dim0major_);
auto c64 = LiteralUtil::CreateR4WithLayout<complex64>({{
{{10.0f, 0.0f, 12.0f, 0.0f}, {0.0f, 15.0f, 0.0f, 17.0f}},
{{0.0f, 19.0f, 0.0f, 21.0f}, {22.0f, 0.0f, 24.0f, 0.0f}},
{{26.0f, 0.0f, 28.0f, 0.0f}, {0.0f, 31.0f, 0.0f, 33.0f}},
}}, layout_r4_dim0major_);
auto c128 = LiteralUtil::CreateR4WithLayout<complex128>({{
{{10.0, 0.0, 12.0, 0.0}, {0.0, 15.0, 0.0, 17.0}},
{{0.0, 19.0, 0.0, 21.0}, {22.0, 0.0, 24.0, 0.0}},
{{26.0, 0.0, 28.0, 0.0}, {0.0, 31.0, 0.0, 33.0}},
}}, layout_r4_dim0major_);
Literal conv;
conv = s8.Convert(U16).value();
EXPECT_EQ(conv, u16);
conv = s8.Convert(S16).value();
EXPECT_EQ(conv, s16);
conv = s8.Convert(U32).value();
EXPECT_EQ(conv, u32);
conv = s8.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = s8.Convert(U64).value();
EXPECT_EQ(conv, u64);
conv = s8.Convert(S64).value();
EXPECT_EQ(conv, s64);
conv = s8.Convert(PRED).value();
EXPECT_EQ(conv, pred);
conv = bf16.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = bf16.Convert(F32).value();
EXPECT_EQ(conv, f32);
conv = pred.Convert(S32).value();
EXPECT_EQ(conv, int32_pred);
conv = s4nums.Convert(S32).value();
EXPECT_EQ(conv, int32_s4nums);
conv = f32.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = f64.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = s32.Convert(F32).value();
EXPECT_EQ(conv, f32);
conv = f32.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = f64.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = s32.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = u32.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = s32.Convert(C64).value();
EXPECT_EQ(conv, c64);
conv = f16.Convert(C64).value();
EXPECT_EQ(conv, c64);
conv = s32.Convert(S16).value();
EXPECT_EQ(conv, s16);
conv = s32.Convert(U16).value();
EXPECT_EQ(conv, u16);
conv = s32.Convert(C128).value();
EXPECT_EQ(conv, c128);
conv = f16.Convert(C128).value();
EXPECT_EQ(conv, c128);
EXPECT_EQ(s32.Convert(TUPLE).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c64.Convert(F32).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c64.Convert(S32).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c128.Convert(F32).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c128.Convert(S32).status().code(), tsl::error::UNIMPLEMENTED);
}
TYPED_TEST(LiteralUtilFloatTest, ConvertIfTypesMatchF8) {
constexpr auto ptype = primitive_util::NativeToPrimitiveType<TypeParam>();
if (!primitive_util::IsF8Type(ptype)) {
GTEST_SKIP() << "Skipping test for non F8 types";
}
auto s8 = LiteralUtil::CreateR2WithLayout<int8_t>(
{{0, 1}, {2, 3}}, LiteralUtilTest::layout_r2_dim0major_);
auto bf16 = LiteralUtil::CreateR2WithLayout<bfloat16>(
{{bfloat16(0.), bfloat16(1.)}, {bfloat16(2.), bfloat16(3.)}},
LiteralUtilTest::layout_r2_dim0major_);
auto f32 = LiteralUtil::CreateR2WithLayout<float>(
{{0., 1.}, {2., 3.}}, LiteralUtilTest::layout_r2_dim0major_);
auto c128 = LiteralUtil::CreateR2WithLayout<complex128>(
{{0., 1.}, {2., 3.}}, LiteralUtilTest::layout_r2_dim0major_);
using f8e5m2_t = tsl::float8_e5m2;
auto f8e5m2 = LiteralUtil::CreateR2WithLayout<f8e5m2_t>(
{{f8e5m2_t{0.}, f8e5m2_t{1.}}, {f8e5m2_t{2.}, f8e5m2_t{3.}}},
LiteralUtilTest::layout_r2_dim0major_);
using e4m3fn_t = tsl::float8_e4m3fn;
auto f8e4m3fn = LiteralUtil::CreateR2WithLayout<e4m3fn_t>(
{{e4m3fn_t{0.}, e4m3fn_t{1.}}, {e4m3fn_t{2.}, e4m3fn_t{3.}}},
LiteralUtilTest::layout_r2_dim0major_);
auto f8 = LiteralUtil::CreateR2WithLayout<TypeParam>(
{{TypeParam{0.}, TypeParam{1.}}, {TypeParam{2.}, TypeParam{3.}}},
LiteralUtilTest::layout_r2_dim0major_);
Literal conv;
conv = s8.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = bf16.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f32.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f8e5m2.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f8e4m3fn.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f8.Convert(S8).value();
EXPECT_EQ(conv, s8);
conv = f8.Convert(BF16).value();
EXPECT_EQ(conv, bf16);
conv = f8.Convert(F32).value();
EXPECT_EQ(conv, f32);
conv = f8.Convert(C128).value();
EXPECT_EQ(conv, c128);
conv = f8.Convert(F8E5M2).value();
EXPECT_EQ(conv, f8e5m2);
conv = f8.Convert(F8E4M3FN).value();
EXPECT_EQ(conv, f8e4m3fn);
}
TEST_F(LiteralUtilTest, BitcastConvert) {
Literal original = LiteralUtil::CreateR1<uint32_t>(
{absl::bit_cast<uint32_t>(2.5f), absl::bit_cast<uint32_t>(-42.25f),
absl::bit_cast<uint32_t>(100.f), 0xbeef});
Literal expected = LiteralUtil::CreateR1<float>(
{2.5f, -42.25f, 100.0f, absl::bit_cast<float>(0xbeef)});
TF_ASSERT_OK_AND_ASSIGN(Literal converted,
original.BitcastConvert(ShapeUtil::ChangeElementType(
original.shape(), F32)));
}
TEST_F(LiteralUtilTest, BitcastConvertBetweenInvalidTypes) {
Literal literal = LiteralUtil::CreateR0<uint32_t>(1234);
absl::Status status =
literal.BitcastConvert(ShapeUtil::ChangeElementType(literal.shape(), F64))
.status();
EXPECT_NE(absl::OkStatus(), status);
EXPECT_TRUE(
absl::StrContains(status.message(), "to a shape of different size"));
}
void SetDefaultLayoutOnProto(ShapeProto* shape_proto) {
CHECK(ShapeUtil::IsArrayPrimitiveType(shape_proto->element_type()));
auto* minor_to_major =
shape_proto->mutable_layout()->mutable_minor_to_major();
minor_to_major->Resize(shape_proto->dimensions_size(), 0);
const int64_t size = minor_to_major->size();
for (int64_t i = 0; i < size; ++i) {
minor_to_major->Set(i, size - 1 - i);
}
}
TEST_F(LiteralUtilTest, CopyFromProto_Bool) {
LiteralProto p;
p.mutable_shape()->set_element_type(PRED);
for (int len = 0; len < 25; ++len) {
p.mutable_shape()->clear_dimensions();
p.mutable_shape()->add_dimensions(len);
SetDefaultLayoutOnProto(p.mutable_shape());
p.clear_preds();
for (int i = 0; i < len; ++i) {
p.add_preds((i % 2) == (len % 2));
}
TF_ASSERT_OK_AND_ASSIGN(Literal literal, Literal::CreateFromProto(p));
ASSERT_EQ(len, literal.data<bool>().size());
int i = 0;
for (bool value : literal.data<bool>()) {
EXPECT_EQ((i % 2) == (len % 2), value);
++i;
}
}
}
TEST_F(LiteralUtilTest, ToProto_f16) {
half h1(1.0f);
half h2(2.0f);
auto m = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}});
EXPECT_EQ(4, ShapeUtil::ElementsIn(m.shape()));
EXPECT_EQ(4, m.data<half>().size());
LiteralProto p = m.ToProto();
EXPECT_EQ(4, ShapeUtil::ElementsIn(Shape(p.shape())));
EXPECT_EQ(8, p.f16s().size());
const char* d = p.f16s().data();
EXPECT_EQ(d[0], 0);
EXPECT_EQ(d[1], 0x3C);
EXPECT_EQ(d[2], 0);
EXPECT_EQ(d[3], 0x40);
EXPECT_EQ(d[4], 0);
EXPECT_EQ(d[5], 0x40);
EXPECT_EQ(d[6], 0);
EXPECT_EQ(d[7], 0x3C);
}
TEST_F(LiteralUtilTest, CopyFromProto_f16) {
half h1(1.0f);
half h2(2.0f);
const char half_vals[8] = {0x00, 0x3C, 0x00, 0x40, 0x00, 0x40, 0x00, 0x3C};
LiteralProto p;
p.mutable_shape()->set_element_type(F16);
p.mutable_shape()->clear_dimensions();
p.mutable_shape()->add_dimensions(4);
SetDefaultLayoutOnProto(p.mutable_shape());
p.clear_f16s();
p.set_f16s(half_vals, 8);
TF_ASSERT_OK_AND_ASSIGN(Literal literal, Literal::CreateFromProto(p));
auto r = literal.data<half>();
ASSERT_EQ(4, r.size());
EXPECT_EQ(h1, r[0]);
EXPECT_EQ(h2, r[1]);
EXPECT_EQ(h2, r[2]);
EXPECT_EQ(h1, r[3]);
}
TEST_F(LiteralUtilTest, CopyFromProto_u16) {
uint16_t u1(0xabcd);
uint16_t u2(0x1234);
const unsigned char uint16_vals[8] = {0xcd, 0xab, 0x34, 0x12,
0x34, 0x12, 0xcd, 0xab};
LiteralProto p;
p.mutable_shape()->set_element_type(U16);
p.mutable_shape()->clear_dimensions();
p.mutable_shape()->add_dimensions(4);
SetDefaultLayoutOnProto(p.mutable_shape());
p.clear_u16s();
p.set_u16s(uint16_vals, 8);
TF_ASSERT_OK_AND_ASSIGN(Literal literal, Literal::CreateFromProto(p));
auto r = literal.data<uint16_t>();
ASSERT_EQ(4, r.size());
EXPECT_EQ(u1, r[0]);
EXPECT_EQ(u2, r[1]);
EXPECT_EQ(u2, r[2]);
EXPECT_EQ(u1, r[3]);
}
TEST_F(LiteralUtilTest, LiteralDynamicSliceTest) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto nested_tuple = LiteralUtil::MakeTuple({&tuple, &scalar});
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(LiteralSlice(scalar, {}), scalar);
EXPECT_EQ(LiteralSlice(matrix, {}), matrix);
EXPECT_EQ(LiteralSlice(tuple, {}), tuple);
EXPECT_EQ(LiteralSlice(nested_tuple, {}), nested_tuple);
EXPECT_EQ(LiteralSlice(nil, {}), nil);
EXPECT_EQ(LiteralSlice(tuple, {0}), scalar);
EXPECT_EQ(LiteralSlice(tuple, {1}), matrix);
EXPECT_EQ(LiteralSlice(nested_tuple, {0}), tuple);
EXPECT_EQ(LiteralSlice(nested_tuple, {0, 0}), scalar);
EXPECT_EQ(LiteralSlice(nested_tuple, {0, 1}), matrix);
EXPECT_EQ(LiteralSlice(nested_tuple, {1}), scalar);
}
TEST_F(LiteralUtilTest, MutatingLiteralSlice) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto nested_tuple = LiteralUtil::MakeTuple({&tuple, &scalar});
const auto nested_tuple_view = LiteralSlice(nested_tuple);
EXPECT_EQ(nested_tuple.Get<float>({}, {0, 0}),
1.0f);
EXPECT_EQ(nested_tuple_view.Get<float>({},
{0, 0}),
1.0f);
nested_tuple.Set<float>({}, {0, 0}, 555.0f);
EXPECT_EQ(nested_tuple.Get<float>({}, {0, 0}),
555.0f);
EXPECT_EQ(nested_tuple_view.Get<float>({},
{0, 0}),
555.0f);
}
TEST_F(LiteralUtilTest, LiteralSliceOfALiteralSlice) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto nested_tuple = LiteralUtil::MakeTuple({&tuple, &scalar});
const auto nested_tuple_view = LiteralSlice(nested_tuple);
const auto tuple_view = LiteralSlice(nested_tuple_view, {0});
const auto matrix_view = LiteralSlice(tuple_view, {1});
EXPECT_EQ(matrix_view,
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromOneBufferPtr) {
std::vector<int64_t> int64_values = {1, 2, 3};
const Shape literal_shape = ShapeUtil::MakeShape(S64, {3});
BorrowingLiteral literal(reinterpret_cast<const char*>(int64_values.data()),
literal_shape);
EXPECT_EQ(literal.Get<int64_t>({0}), 1);
EXPECT_EQ(literal.Get<int64_t>({1}), 2);
EXPECT_EQ(literal.Get<int64_t>({2}), 3);
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromMultipleBufferPtrs) {
std::vector<int64_t> one_two_three = {1, 2, 3};
const Shape one_two_three_shape = ShapeUtil::MakeShape(S64, {3});
std::vector<int64_t> hundred = {100};
const Shape hundred_shape = ShapeUtil::MakeShape(S64, {1});
std::vector<const char*> src_buf_ptrs;
src_buf_ptrs.emplace_back(
reinterpret_cast<const char*>(one_two_three.data()));
src_buf_ptrs.emplace_back(reinterpret_cast<const char*>(hundred.data()));
auto literal_tuple = BorrowingLiteral(
src_buf_ptrs,
ShapeUtil::MakeTupleShape({one_two_three_shape, hundred_shape}));
EXPECT_EQ(
literal_tuple.Get<int64_t>({0}, {0}), 1);
EXPECT_EQ(
literal_tuple.Get<int64_t>({0}, {1}),
100);
EXPECT_EQ(
literal_tuple.Get<int64_t>({1}, {0}), 2);
EXPECT_EQ(
literal_tuple.Get<int64_t>({2}, {0}), 3);
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromShapeTree) {
std::vector<float> data = {1.0, 2.0, 3.0};
Shape shape = ShapeUtil::MakeShape(PrimitiveType::F32, {3});
Shape tuple = ShapeUtil::MakeTupleShape({shape, shape});
Shape nested_tuple = ShapeUtil::MakeTupleShape({tuple, shape});
ShapeTree<const char*> ptr_tree(nested_tuple);
*ptr_tree.mutable_element({0, 0}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({0, 1}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({1}) = reinterpret_cast<char*>(data.data());
BorrowingLiteral literal(ptr_tree);
EXPECT_THAT(literal.data<float>({0, 0}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({0, 1}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({1}), ElementsAre(1.0, 2.0, 3.0));
}
TEST_F(LiteralUtilTest, MutableBorrowingLiteralFromShapeTree) {
std::vector<float> data = {1.0, 2.0, 3.0};
Shape shape = ShapeUtil::MakeShape(PrimitiveType::F32, {3});
Shape tuple = ShapeUtil::MakeTupleShape({shape, shape});
Shape nested_tuple = ShapeUtil::MakeTupleShape({tuple, shape});
ShapeTree<char*> ptr_tree(nested_tuple);
*ptr_tree.mutable_element({0, 0}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({0, 1}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({1}) = reinterpret_cast<char*>(data.data());
MutableBorrowingLiteral literal(ptr_tree);
EXPECT_THAT(literal.data<float>({0, 0}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({0, 1}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({1}), ElementsAre(1.0, 2.0, 3.0));
}
TEST_F(LiteralUtilTest, LiteralMove) {
Literal matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Literal literal(std::move(matrix));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {2, 2}), literal.shape()));
EXPECT_EQ(literal.Get<float>({0, 0}), 1.0);
EXPECT_EQ(literal.Get<float>({0, 1}), 2.0);
EXPECT_EQ(literal.Get<float>({1, 0}), 3.0);
EXPECT_EQ(literal.Get<float>({1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, DecomposeTuple) {
Literal nil_literal(ShapeUtil::MakeNil());
Literal inner_elements[] = {
LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR1<double>({23.0, 44.0}),
};
Literal tuple_elements[] = {
LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}}),
LiteralUtil::MakeTuple(
{&inner_elements[0], &inner_elements[1], &nil_literal}),
};
Literal nested_tuple = LiteralUtil::MakeTuple(
{&tuple_elements[0], &tuple_elements[1], &nil_literal});
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(nested_tuple.shape()));
std::vector<Literal> elements = nested_tuple.DecomposeTuple();
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(nested_tuple.shape()));
ASSERT_EQ(elements.size(), 3);
EXPECT_TRUE(ShapeUtil::Compatible(elements[0].shape(),
ShapeUtil::MakeShape(S32, {2, 2})));
EXPECT_EQ(elements[0].Get<int32_t>({0, 0}), 1);
EXPECT_EQ(elements[0].Get<int32_t>({0, 1}), 2);
EXPECT_EQ(elements[0].Get<int32_t>({1, 0}), 3);
EXPECT_EQ(elements[0].Get<int32_t>({1, 1}), 4);
EXPECT_TRUE(ShapeUtil::Compatible(
elements[1].shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F64, {2}),
ShapeUtil::MakeNil()})));
EXPECT_EQ(elements[1].Get<int32_t>({}, {0}), 42);
EXPECT_EQ(elements[1].Get<double>({0}, {1}), 23.0);
EXPECT_EQ(elements[1].Get<double>({1}, {1}), 44.0);
EXPECT_TRUE(ShapeUtil::Compatible(elements[2].shape(), ShapeUtil::MakeNil()));
}
TEST_F(LiteralUtilTest, DecomposeEmptyTuple) {
Literal nil_literal(ShapeUtil::MakeNil());
std::vector<Literal> elements = nil_literal.DecomposeTuple();
EXPECT_EQ(elements.size(), 0);
}
TEST_F(LiteralUtilTest, MoveIntoTuple) {
std::vector<Literal> elements;
elements.push_back(LiteralUtil::CreateR0<float>(1.0));
elements.push_back(LiteralUtil::CreateR1<int32_t>({4, 8}));
std::vector<Literal> inner_elements;
inner_elements.push_back(LiteralUtil::CreateR0<int32_t>(42));
inner_elements.push_back(LiteralUtil::CreateR1<double>({23.0, 44.0}));
elements.push_back(
LiteralUtil::MakeTuple({&inner_elements[0], &inner_elements[1]}));
Literal literal = Literal::MoveIntoTuple(absl::MakeSpan(elements));
ASSERT_TRUE(literal.shape().IsTuple());
ASSERT_EQ(ShapeUtil::TupleElementCount(literal.shape()), 3);
EXPECT_EQ(literal.Get<float>({}, {0}), 1.0);
EXPECT_EQ(literal.Get<int32_t>({0}, {1}), 4);
EXPECT_EQ(literal.Get<int32_t>({1}, {1}), 8);
EXPECT_EQ(literal.Get<int32_t>({}, {2, 0}), 42);
EXPECT_EQ(literal.Get<double>({0}, {2, 1}), 23.0);
EXPECT_EQ(literal.Get<double>({1}, {2, 1}), 44.0);
for (const Literal& element : elements) {
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(element.shape()));
}
}
TEST_F(LiteralUtilTest, MoveIntoEmptyTuple) {
Literal literal = Literal::MoveIntoTuple({});
ASSERT_TRUE(literal.shape().IsTuple());
EXPECT_EQ(ShapeUtil::TupleElementCount(literal.shape()), 0);
}
TEST_F(LiteralUtilTest, LiteralMoveAssignment) {
Literal literal;
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeNil(), literal.shape()));
Literal matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
literal = std::move(matrix);
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {2, 2}), literal.shape()));
EXPECT_EQ(literal.Get<float>({0, 0}), 1.0);
EXPECT_EQ(literal.Get<float>({0, 1}), 2.0);
EXPECT_EQ(literal.Get<float>({1, 0}), 3.0);
EXPECT_EQ(literal.Get<float>({1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, LiteralSliceCopy) {
Literal matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
const auto matrix_view = LiteralSlice(matrix);
LiteralSlice matrix_view_copy(matrix_view);
EXPECT_EQ(matrix_view_copy.Get<float>({0, 0}), 1.0);
EXPECT_EQ(matrix_view_copy.Get<float>({0, 1}), 2.0);
EXPECT_EQ(matrix_view_copy.Get<float>({1, 0}), 3.0);
EXPECT_EQ(matrix_view_copy.Get<float>({1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, GetSetTuple) {
Literal elements[] = {
LiteralUtil::CreateR0<float>(42.0),
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}),
};
auto tuple = LiteralUtil::MakeTuple({&elements[0], &elements[1]});
EXPECT_EQ(tuple.Get<float>({}, {0}), 42.0);
tuple.Set<float>({}, {0}, -5.0);
EXPECT_EQ(tuple.Get<float>({}, {0}), -5.0);
EXPECT_EQ(tuple.Get<float>({1, 0}, {1}), 3.0);
tuple.Set<float>({1, 0}, {1}, -4.0);
EXPECT_EQ(tuple.Get<float>({1, 0}, {1}),
-4.0);
}
TEST_F(LiteralUtilTest, CreateFromShapeZeroInitialized) {
Literal scalar_f32 = Literal::CreateFromShape(ShapeUtil::MakeShape(F32, {}));
EXPECT_EQ(scalar_f32.Get<float>({}), 0.0);
EXPECT_TRUE(scalar_f32.IsAll(0));
Literal vector_s32 = Literal::CreateFromShape(ShapeUtil::MakeShape(S32, {3}));
EXPECT_EQ(vector_s32.Get<int32_t>({0}), 0);
EXPECT_EQ(vector_s32.Get<int32_t>({1}), 0);
EXPECT_EQ(vector_s32.Get<int32_t>({2}), 0);
EXPECT_TRUE(vector_s32.IsAll(0));
Literal tuple = Literal::CreateFromShape(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F64, {}), ShapeUtil::MakeShape(PRED, {2}),
ShapeUtil::MakeShape(U64, {2, 1}), ShapeUtil::MakeShape(C64, {}),
ShapeUtil::MakeShape(C128, {})}));
EXPECT_EQ(tuple.Get<double>({}, {0}), 0.0);
EXPECT_EQ(tuple.Get<bool>({0}, {1}), false);
EXPECT_EQ(tuple.Get<bool>({1}, {1}), false);
EXPECT_EQ(tuple.Get<uint64_t>({0, 0}, {2}), 0);
EXPECT_EQ(tuple.Get<uint64_t>({1, 0}, {2}), 0);
EXPECT_EQ(tuple.Get<complex64>({}, {3}), complex64(0.0f, 0.0f));
EXPECT_EQ(tuple.Get<complex128>({}, {4}), complex128(0.0, 0.0));
}
TEST_F(LiteralUtilTest, ProtoRoundTrip) {
auto one_f32 = LiteralUtil::CreateR0<float>(1.0);
auto two_f32 = LiteralUtil::CreateR0<float>(2.0);
auto vector_int8 = LiteralUtil::CreateR1<int8_t>({-128, 0, 2, 4, 7, 56, 127});
auto vector_uint8 = LiteralUtil::CreateR1<uint8_t>({128, 0, 2, 56, 127, 255});
auto vector_c64 = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_c128 =
LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_bfloat16 = LiteralUtil::CreateR1<bfloat16>(
{bfloat16{-1.0}, bfloat16{2.0}, bfloat16{-3.0}});
auto vector_half =
LiteralUtil::CreateR1<half>({half{10.0}, half{20.0}, half{-30.0}});
using e5 = tsl::float8_e5m2;
auto vector_f8e5m2 =
LiteralUtil::CreateR1<e5>({e5{10.0}, e5{20.0}, e5{-32.0}});
using e4 = tsl::float8_e4m3;
auto vector_f8e4m3 =
LiteralUtil::CreateR1<e4>({e4{10.0}, e4{20.0}, e4{-32.0}});
using e4fn = tsl::float8_e4m3fn;
auto vector_f8e4m3fn =
LiteralUtil::CreateR1<e4fn>({e4fn{10.0}, e4fn{20.0}, e4fn{-32.0}});
using b11 = tsl::float8_e4m3b11fnuz;
auto vector_f8e4m3b11 =
LiteralUtil::CreateR1<b11>({b11{10.0}, b11{20.0}, b11{-30.0}});
using e5f = tsl::float8_e5m2fnuz;
auto vector_f8e5m2fnuz =
LiteralUtil::CreateR1<e5f>({e5f{10.0}, e5f{20.0}, e5f{-30.0}});
using e4f = tsl::float8_e4m3fnuz;
auto vector_f8e4m3fnuz =
LiteralUtil::CreateR1<e4f>({e4f{10.0}, e4f{20.0}, e4f{-30.0}});
using e3 = tsl::float8_e3m4;
auto vector_f8e3m4 = LiteralUtil::CreateR1<e3>({e3{2.5}, e3{5.0}, e3{-8.0}});
auto matrix_pred =
LiteralUtil::CreateR2<bool>({{true, false, true}, {false, false, true}});
auto vector_s4 = LiteralUtil::CreateR1<s4>({s4{-1}, s4{3}, s4{7}});
auto vector_u4 = LiteralUtil::CreateR1<u4>({u4{1}, u4{3}, u4{15}});
auto tuple = LiteralUtil::MakeTuple(
{&one_f32, &vector_half, &matrix_pred, &matrix_pred});
Literal nil_literal(ShapeUtil::MakeNil());
auto nested_tuple =
LiteralUtil::MakeTuple({&tuple, &vector_bfloat16, &tuple, &nil_literal});
auto to_from_proto = [](const Literal& literal) -> Literal {
return Literal::CreateFromProto(literal.ToProto()).value();
};
EXPECT_EQ(one_f32, to_from_proto(one_f32));
EXPECT_EQ(vector_int8, to_from_proto(vector_int8));
EXPECT_EQ(vector_uint8, to_from_proto(vector_uint8));
EXPECT_EQ(vector_c64, to_from_proto(vector_c64));
EXPECT_EQ(vector_c128, to_from_proto(vector_c128));
EXPECT_EQ(vector_bfloat16, to_from_proto(vector_bfloat16));
EXPECT_EQ(vector_f8e5m2, to_from_proto(vector_f8e5m2));
EXPECT_EQ(vector_f8e4m3, to_from_proto(vector_f8e4m3));
EXPECT_EQ(vector_f8e4m3fn, to_from_proto(vector_f8e4m3fn));
EXPECT_EQ(vector_f8e4m3b11, to_from_proto(vector_f8e4m3b11));
EXPECT_EQ(vector_f8e5m2fnuz, to_from_proto(vector_f8e5m2fnuz));
EXPECT_EQ(vector_f8e4m3fnuz, to_from_proto(vector_f8e4m3fnuz));
EXPECT_EQ(vector_f8e3m4, to_from_proto(vector_f8e3m4));
EXPECT_EQ(matrix_pred, to_from_proto(matrix_pred));
EXPECT_EQ(vector_s4, to_from_proto(vector_s4));
EXPECT_EQ(vector_u4, to_from_proto(vector_u4));
EXPECT_EQ(tuple, to_from_proto(tuple));
EXPECT_EQ(nested_tuple, to_from_proto(nested_tuple));
EXPECT_EQ(nil_literal, to_from_proto(nil_literal));
EXPECT_NE(one_f32, two_f32);
EXPECT_NE(one_f32, to_from_proto(two_f32));
}
TEST_F(LiteralUtilTest, InvalidProtoNoValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {3}).ToProto();
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 3 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, ValidProtoNoValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {3}).ToProto();
absl::Status status =
Literal::CreateFromProto(proto, false)
.status();
EXPECT_TRUE(status.ok());
}
TEST_F(LiteralUtilTest, ValidProtoWithClearedValues) {
auto literal = LiteralUtil::CreateR1<bool>({true, false, true});
LiteralProto proto = literal.ToProto();
EXPECT_EQ(proto.preds_size(), 3);
proto.clear_preds();
EXPECT_EQ(proto.preds_size(), 0);
absl::Status status =
Literal::CreateFromProto(proto, false)
.status();
EXPECT_TRUE(status.ok());
}
TEST_F(LiteralUtilTest, InvalidProtoNoShape) {
LiteralProto proto;
proto.add_preds(false);
proto.add_preds(true);
proto.add_preds(false);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("LiteralProto has no shape"));
}
TEST_F(LiteralUtilTest, InvalidProtoWrongContainer) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {3}).ToProto();
proto.add_preds(false);
proto.add_preds(true);
proto.add_preds(false);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 3 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooFewValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {42, 2}).ToProto();
proto.add_f32s(1.0);
proto.add_f32s(2.0);
proto.add_f32s(3.0);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 84 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooManyValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(S32, {2}).ToProto();
proto.add_s32s(42);
proto.add_s32s(-10);
proto.add_s32s(100);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 2 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, InvalidProtoMissingLayout) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(PRED, {2, 2}).ToProto();
proto.mutable_shape()->clear_layout();
proto.add_preds(true);
proto.add_preds(false);
proto.add_preds(true);
proto.add_preds(false);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("LiteralProto has no layout"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooFewTupleElements) {
LiteralProto proto;
*proto.mutable_shape() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {2}), ShapeUtil::MakeShape(F32, {})})
.ToProto();
LiteralProto* element0 = proto.add_tuple_literals();
*element0->mutable_shape() =
ShapeUtil::GetTupleElementShape(Shape(proto.shape()), 0).ToProto();
element0->add_preds(false);
element0->add_preds(true);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected 2 tuple elements"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooManyTupleElements) {
LiteralProto proto;
*proto.mutable_shape() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {2}), ShapeUtil::MakeShape(F32, {})})
.ToProto();
LiteralProto* element0 = proto.add_tuple_literals();
*element0->mutable_shape() =
ShapeUtil::GetTupleElementShape(Shape(proto.shape()), 0).ToProto();
element0->add_preds(false);
element0->add_preds(true);
LiteralProto* element1 = proto.add_tuple_literals();
*element1->mutable_shape() =
ShapeUtil::GetTupleElementShape(Shape(proto.shape()), 1).ToProto();
element1->add_f32s(42.0);
LiteralProto* element2 = proto.add_tuple_literals();
*element2->mutable_shape() = ShapeUtil::MakeShape(F32, {}).ToProto();
element2->add_f32s(123.0);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected 2 tuple elements"));
}
TEST_F(LiteralUtilTest, BroadcastVectorToMatrix0) {
Literal literal = LiteralUtil::CreateR1<int64_t>({1, 2});
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {2, 2}),
{0}));
EXPECT_EQ(broadcasted_literal,
LiteralUtil::CreateR2<int64_t>({{1, 1}, {2, 2}}));
}
TEST_F(LiteralUtilTest, BroadcastVectorToMatrix1) {
Literal literal = LiteralUtil::CreateR1<int64_t>({1, 2});
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {2, 2}),
{1}));
EXPECT_EQ(broadcasted_literal,
LiteralUtil::CreateR2<int64_t>({{1, 2}, {1, 2}}));
}
TEST_F(LiteralUtilTest, BroadcastScalarToMatrix) {
Literal literal = LiteralUtil::CreateR0<int32_t>(9);
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S32, {2, 2}),
{}));
EXPECT_EQ(broadcasted_literal,
LiteralUtil::CreateR2<int32_t>({{9, 9}, {9, 9}}));
}
TEST_F(LiteralUtilTest, DynamicBroadcast) {
Literal literal = LiteralUtil::CreateR1<int64_t>({1, 2});
literal.SetDynamicSize(0, 1);
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {2, 2}),
{1}));
EXPECT_EQ(broadcasted_literal, LiteralUtil::CreateR2<int64_t>({{1}, {1}}));
EXPECT_EQ(broadcasted_literal.GetDynamicSize(1), 1);
}
TEST_F(LiteralUtilTest, GetAsScalarInt64) {
auto scalar1 = LiteralUtil::CreateR0<int32_t>(12);
EXPECT_EQ(LiteralUtil::LiteralAsScalarInt64(scalar1).value(), (int64_t)12);
auto scalar2 = LiteralUtil::CreateR0<int8_t>(12);
EXPECT_EQ(LiteralUtil::LiteralAsScalarInt64(scalar2).value(), (int64_t)12);
auto non_scalar1 = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}});
EXPECT_FALSE(LiteralUtil::LiteralAsScalarInt64(non_scalar1).has_value());
auto non_scalar2 = LiteralUtil::CreateR1<int32_t>({{1, 2}});
EXPECT_FALSE(LiteralUtil::LiteralAsScalarInt64(non_scalar2).has_value());
}
TEST_F(LiteralUtilTest, GetAsDouble) {
auto m = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*m.GetAsDouble({0, 0}), 1.0);
EXPECT_EQ(*m.GetAsDouble({1, 0}), 3.0);
}
TEST_F(LiteralUtilTest, GetSumAsDouble) {
auto m = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*m.GetSumAsDouble({0, 3}), 1.0 + 4.0);
EXPECT_EQ(*m.GetSumAsDouble({0, 1, 2, 3}), 1.0 + 2.0 + 3.0 + 4.0);
auto md = LiteralUtil::CreateR2<double>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*md.GetSumAsDouble({0, 3}), 1.0 + 4.0);
EXPECT_EQ(*md.GetSumAsDouble({0, 1, 2, 3}), 1.0 + 2.0 + 3.0 + 4.0);
std::vector<float> vals(1024, 1.0);
auto v = LiteralUtil::CreateR1<float>(vals);
std::vector<int64_t> indices;
for (int i = 0; i < 1024; i += 2) {
indices.push_back(i);
EXPECT_EQ(*v.GetSumAsDouble(indices), (i + 2) / 2.0);
}
}
TEST_F(LiteralUtilTest, GetAsComplex128) {
complex128 value = {1, 0};
Literal c1 = LiteralUtil::CreateR0<complex128>(value);
EXPECT_EQ(*c1.GetAsComplex128({}), value);
Literal c2 = LiteralUtil::CreateR0<double>(1);
EXPECT_EQ(*c2.GetAsComplex128({}), value);
complex64 float_value = {1, 0};
Literal c4 = LiteralUtil::CreateR0<complex64>(float_value);
EXPECT_EQ(*c4.GetAsComplex128({}), value);
complex128 other_value = {1, 2};
Literal c5 = LiteralUtil::CreateR0<complex128>(other_value);
EXPECT_EQ(*c5.GetAsComplex128({}), other_value);
Literal c6 = LiteralUtil::CreateR0<int64_t>(1);
EXPECT_FALSE(c6.GetAsComplex128({}).has_value());
}
TEST_F(LiteralUtilTest, SliceOnBool) {
Literal c1 = LiteralUtil::CreateR1<bool>({true, true, false});
EXPECT_EQ(c1, c1.Slice({0}, {3}));
}
TEST_F(LiteralUtilTest, IsEqualAt) {
double val_double = 10.0;
int val_integral = 10;
Literal c1 = LiteralUtil::CreateR0<int>(10);
EXPECT_TRUE(c1.IsEqualAt({}, val_double));
EXPECT_TRUE(c1.IsEqualAt({}, val_integral));
Literal c2 = LiteralUtil::CreateR0<double>(10);
EXPECT_TRUE(c2.IsEqualAt({}, val_double));
EXPECT_TRUE(c2.IsEqualAt({}, val_integral));
Literal c3 =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2{val_double});
EXPECT_TRUE(c3.IsEqualAt({}, val_double));
EXPECT_TRUE(c3.IsEqualAt({}, val_integral));
complex128 val_complex = {10, 0};
EXPECT_TRUE(c1.IsEqualAt({}, val_complex));
EXPECT_TRUE(c2.IsEqualAt({}, val_complex));
EXPECT_TRUE(c3.IsEqualAt({}, val_complex));
Literal c4 = LiteralUtil::CreateR0<complex128>(val_complex);
EXPECT_TRUE(c4.IsEqualAt({}, val_double));
EXPECT_TRUE(c4.IsEqualAt({}, val_integral));
EXPECT_TRUE(c4.IsEqualAt({}, val_complex));
EXPECT_FALSE(c4.IsEqualAt({}, std::numeric_limits<double>::infinity()));
complex128 val_true_complex = {10, 3};
complex64 val_smaller_complex = {10, 3};
Literal c5 = LiteralUtil::CreateR0<complex128>(val_true_complex);
EXPECT_TRUE(c5.IsEqualAt({}, val_true_complex));
EXPECT_TRUE(c5.IsEqualAt({}, val_smaller_complex));
Literal c6 = LiteralUtil::CreateR0<tsl::float8_e5m2fnuz>(
tsl::float8_e5m2fnuz{val_double});
EXPECT_TRUE(c6.IsEqualAt({}, val_double));
EXPECT_TRUE(c6.IsEqualAt({}, val_integral));
Literal c7 = LiteralUtil::CreateR0<tsl::float8_e4m3fnuz>(
tsl::float8_e4m3fnuz{val_double});
EXPECT_TRUE(c6.IsEqualAt({}, val_double));
EXPECT_TRUE(c6.IsEqualAt({}, val_integral));
Literal c8 =
LiteralUtil::CreateR0<tsl::float8_e4m3>(tsl::float8_e4m3{val_double});
EXPECT_TRUE(c8.IsEqualAt({}, val_double));
EXPECT_TRUE(c8.IsEqualAt({}, val_integral));
Literal c9 =
LiteralUtil::CreateR0<tsl::float8_e4m3fn>(tsl::float8_e4m3fn{val_double});
EXPECT_TRUE(c9.IsEqualAt({}, val_double));
EXPECT_TRUE(c9.IsEqualAt({}, val_integral));
Literal c10 =
LiteralUtil::CreateR0<tsl::float8_e3m4>(tsl::float8_e3m4{val_double});
EXPECT_TRUE(c10.IsEqualAt({}, val_double));
EXPECT_TRUE(c10.IsEqualAt({}, val_integral));
}
TEST_F(LiteralUtilTest, CreateFromShapeWithUnknownLeafArrays) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeShape(F32, {4, 4}));
EXPECT_FALSE(c1.IsKnown());
}
TEST_F(LiteralUtilTest, CreateFromShapeWithUnknownLeafArraysS4Tuple) {
auto inner_shape = ShapeUtil::MakeShape(S4, {4, 4});
inner_shape.mutable_layout()->set_element_size_in_bits(4);
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeTupleShape({inner_shape}));
EXPECT_FALSE(c1.IsKnown());
}
TEST_F(LiteralUtilTest, CreatePartiallyKnownTuple) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeShape(F32, {4, 4}));
Literal c2 = LiteralUtil::CreateR0<int>(10);
Literal c3 = LiteralUtil::MakeTuple({&c1, &c2});
Literal c4 = LiteralUtil::CreateR0<int>(100);
Literal c5 = LiteralUtil::MakeTuple({&c4, &c3});
EXPECT_FALSE(c5.IsKnown());
}
TEST_F(LiteralUtilTest, CopyFromPartiallyKnownTuple) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeShape(F32, {4, 4}));
Literal c2 = LiteralUtil::CreateR0<int>(10);
Literal c3 = LiteralUtil::MakeTuple({&c1, &c2});
Literal c4 = LiteralUtil::CreateR0<int>(100);
Literal c5 = LiteralUtil::MakeTuple({&c4, &c3});
Literal c6 = Literal::CreateFromShape(c5.shape());
TF_ASSERT_OK(
c6.CopyFrom(c5, {1}, {1}));
EXPECT_FALSE(c6.IsKnown());
}
TEST_F(LiteralUtilTest, CopyFromPartiallyKnownTupleUnknownTupleElement) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4, 4}),
ShapeUtil::MakeShape(F32, {4, 4})}));
Literal c2 = LiteralUtil::CreateR0<int>(10);
Literal c3 = LiteralUtil::MakeTuple({&c1, &c2});
Literal c4 = LiteralUtil::CreateR0<int>(100);
Literal c5 = LiteralUtil::MakeTuple({&c4, &c3});
Literal c6 = Literal::CreateFromShape(c5.shape());
Literal c1_copy = Literal::CreateFromShape(c1.shape());
Literal c2_copy = Literal::CreateFromShape(c2.shape());
TF_ASSERT_OK(
c6.CopyFrom(c5, {1}, {1}));
TF_ASSERT_OK(c1_copy.CopyFrom(c6, {},
{1, 0}));
TF_ASSERT_OK(c2_copy.CopyFrom(c6, {},
{1, 1}));
EXPECT_FALSE(c6.IsKnown());
EXPECT_FALSE(c1_copy.IsKnown());
EXPECT_TRUE(c2_copy.IsKnown());
}
TEST_F(LiteralUtilTest, PopulateR1Dynamic) {
auto literal = Literal(ShapeUtil::MakeShape(U32, {20}));
literal.SetDynamicSize(0, 10);
literal.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
std::string expected = "u32[<=20](10) {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2DynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(U32, {5, 2}));
literal.SetDynamicSize(0, 3);
literal.PopulateR2<uint32_t>({{1, 2}, {3, 4}, {5, 6}});
std::string expected = R"(u32[<=5,2](3,2) {
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2DynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(U32, {2, 5}));
literal.SetDynamicSize(1, 3);
literal.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}});
std::string expected = R"(u32[2,<=5](2,3) {
{ 1, 2, 3 },
{ 4, 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateFrom1DArray) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {20}));
literal.SetDynamicSize(0, 10);
xla::Array<float> array({10});
for (int i = 0; i < 10; i++) {
array(i) = static_cast<float>(i);
}
literal.PopulateFromArray(array);
std::string expected = "f32[<=20](10) {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateFromArrayDynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 3;
const uint32_t cols = 5;
literal.SetDynamicSize(0, rows);
xla::Array<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateFromArray(array);
std::string expected = R"(f32[<=5,5](3,5) {
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateFromArrayDynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 5;
const uint32_t cols = 3;
literal.SetDynamicSize(1, cols);
xla::Array<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateFromArray(array);
std::string expected = R"(f32[5,<=5](5,3) {
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2FromArray2DDynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 3;
const uint32_t cols = 5;
literal.SetDynamicSize(0, rows);
xla::Array2D<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateR2FromArray2D(array);
std::string expected = R"(f32[<=5,5](3,5) {
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2FromArray2DDynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 5;
const uint32_t cols = 3;
literal.SetDynamicSize(1, cols);
xla::Array2D<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateR2FromArray2D(array);
std::string expected = R"(f32[5,<=5](5,3) {
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2FromArray2DDynamicDim0Dim1) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 3;
const uint32_t cols = 2;
literal.SetDynamicSize(0, rows);
literal.SetDynamicSize(1, cols);
xla::Array2D<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateR2FromArray2D(array);
std::string expected = R"(f32[<=5,<=5](3,2) {
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR3FromArray3DDynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(S32, {3, 3, 3}));
const uint32_t rows = 2;
const uint32_t cols = 3;
const uint32_t depth = 3;
literal.SetDynamicSize(0, rows);
xla::Array3D<int32_t> array({rows, cols, depth});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < depth; k++) {
array(i, j, k) = static_cast<int32_t>(k);
}
}
}
literal.PopulateR3FromArray3D(array);
std::string expected = R"(s32[<=3,3,3](2,3,3) {
{
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
},
{
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR3FromArray3DDynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(S32, {3, 3, 3}));
const uint32_t rows = 3;
const uint32_t cols = 2;
const uint32_t depth = 3;
literal.SetDynamicSize(1, cols);
xla::Array3D<int32_t> array({rows, cols, depth});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < depth; k++) {
array(i, j, k) = static_cast<int32_t>(k);
}
}
}
literal.PopulateR3FromArray3D(array);
std::string expected = R"(s32[3,<=3,3](3,2,3) {
{
{ 0, 1, 2 },
{ 0, 1, 2 }
},
{
{ 0, 1, 2 },
{ 0, 1, 2 }
},
{
{ 0, 1, 2 },
{ 0, 1, 2 }
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR3FromArray3DDynamicDim2) {
auto literal = Literal(ShapeUtil::MakeShape(S32, {3, 3, 3}));
const uint32_t rows = 3;
const uint32_t cols = 3;
const uint32_t depth = 2;
literal.SetDynamicSize(2, depth);
xla::Array3D<int32_t> array({rows, cols, depth});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < depth; k++) {
array(i, j, k) = static_cast<int32_t>(k);
}
}
}
literal.PopulateR3FromArray3D(array);
std::string expected = R"(s32[3,3,<=3](3,3,2) {
{
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
},
{
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
},
{
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, Compare4BitType) {
Literal literal1 = Literal(ShapeUtil::MakeShape(S4, {}));
Literal literal2 = Literal(ShapeUtil::MakeShape(S4, {}));
void* p = literal1.untyped_data();
void* q = literal2.untyped_data();
*((uint8_t*)p) = 0x44;
*((uint8_t*)q) = 0xc4;
std::string expected = R"(s4[] 4)";
EXPECT_EQ(expected, literal1.ToString());
EXPECT_EQ(literal1.ToString(), literal2.ToString());
EXPECT_EQ(literal1, literal2);
}
class LiteralSerializationTest : public ::testing::Test,
public ::testing::WithParamInterface<Shape> {
public:
static std::vector<Shape> GenerateSimpleParams() {
std::vector<Shape> params;
for (PrimitiveType element_type :
{PRED, S4, U4, S8, U8, S16,
U16, S32, U32, S64, U64, F16,
F32, F64, BF16, F8E5M2, F8E4M3, F8E4M3FN,
F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ, F8E3M4, C64, C128}) {
for (const DimensionVector& dimensions : {
DimensionVector{},
DimensionVector{0},
DimensionVector{1},
DimensionVector{7},
DimensionVector{8},
DimensionVector{9},
DimensionVector{0, 8},
DimensionVector{8, 9},
}) {
params.push_back(ShapeUtil::MakeShape(element_type, dimensions));
}
}
return params;
}
static std::vector<Shape> GenerateTupleParams() {
std::vector<Shape> params;
const Shape tuple_elements[] = {
ShapeUtil::MakeShape(PRED, {}),
ShapeUtil::MakeShape(U4, {3}),
ShapeUtil::MakeShape(U32, {0}),
ShapeUtil::MakeShape(F32, {7}),
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(BF16, {3}),
ShapeUtil::MakeShape(C64, {7}),
}),
};
for (const Shape& lhs : tuple_elements) {
for (const Shape& rhs : tuple_elements) {
params.push_back(ShapeUtil::MakeTupleShape({lhs, rhs}));
}
}
return params;
}
};
TEST_P(LiteralSerializationTest, Test) {
const Shape& shape = GetParam();
LOG(INFO) << "shape: " << shape.ToString();
absl::InsecureBitGen bitgen(std::seed_seq({42}));
Literal literal(shape);
ASSERT_NO_FATAL_FAILURE(ShapeUtil::ForEachSubshape(
shape, [&](const Shape& subshape, const ShapeIndex& shape_index) {
if (subshape.IsTuple()) {
return;
}
ASSERT_TRUE(subshape.IsArray());
primitive_util::ArrayTypeSwitch<void>(
[&](auto primitive_type) {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
for (auto& element : literal.data<NativeT>(shape_index)) {
if constexpr (std::is_same_v<NativeT, bool>) {
element = absl::Uniform<int>(bitgen, 0, 2);
} else if constexpr (primitive_util::IsComplexType(
primitive_type)) {
element = NativeT(absl::Uniform<double>(bitgen, -1.0, 1.0),
absl::Uniform<double>(bitgen, -1.0, 1.0));
} else if constexpr (primitive_util::IsFloatingPointType(
primitive_type)) {
element = static_cast<NativeT>(
absl::Uniform<double>(bitgen, -1.0, 1.0));
} else {
element =
static_cast<NativeT>(absl::Uniform<uint64_t>(bitgen));
}
}
},
subshape.element_type());
}));
TF_ASSERT_OK_AND_ASSIGN(std::string serialized, literal.SerializeAsString());
TF_ASSERT_OK_AND_ASSIGN(Literal deserialized,
Literal::DeserializeFromString(serialized));
EXPECT_EQ(literal, deserialized);
}
INSTANTIATE_TEST_SUITE_P(
Simple, LiteralSerializationTest,
::testing::ValuesIn(LiteralSerializationTest::GenerateSimpleParams()));
INSTANTIATE_TEST_SUITE_P(
Tuples, LiteralSerializationTest,
::testing::ValuesIn(LiteralSerializationTest::GenerateTupleParams()));
void BM_BroadcastVectorToMatrix(::testing::benchmark::State& state) {
const int d0 = state.range(0);
const int d1 = state.range(1);
std::vector<int64_t> v(d0);
for (int i = 0; i < d0; i++) {
v[i] = i;
}
Literal literal = LiteralUtil::CreateR1<int64_t>(v);
int count = 0;
for (auto s : state) {
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {d0, d1}),
{0}));
if (count == 0) {
state.SetLabel(literal.shape().ToString() + " to " +
broadcasted_literal.shape().ToString());
}
count++;
}
}
BENCHMARK(BM_BroadcastVectorToMatrix)
->ArgPair(16, 16)
->ArgPair(16, 1024)
->ArgPair(1024, 1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/literal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/literal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca36bd5b-bb69-404b-a7e1-93b19435d268 | cpp | abseil/abseil-cpp | sysinfo | absl/base/internal/sysinfo.cc | absl/base/internal/sysinfo_test.cc | #include "absl/base/internal/sysinfo.h"
#include "absl/base/attributes.h"
#ifdef _WIN32
#include <windows.h>
#else
#include <fcntl.h>
#include <pthread.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#ifdef __linux__
#include <sys/syscall.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
#ifdef __FreeBSD__
#include <pthread_np.h>
#endif
#ifdef __NetBSD__
#include <lwp.h>
#endif
#if defined(__myriad2__)
#include <rtems.h>
#endif
#if defined(__Fuchsia__)
#include <zircon/process.h>
#endif
#include <string.h>
#include <cassert>
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <limits>
#include <thread>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h"
#include "absl/base/thread_annotations.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
#if defined(_WIN32)
DWORD Win32CountSetBits(ULONG_PTR bitMask) {
for (DWORD bitSetCount = 0; ; ++bitSetCount) {
if (bitMask == 0) return bitSetCount;
bitMask &= bitMask - 1;
}
}
int Win32NumCPUs() {
#pragma comment(lib, "kernel32.lib")
using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
DWORD info_size = sizeof(Info);
Info* info(static_cast<Info*>(malloc(info_size)));
if (info == nullptr) return 0;
bool success = GetLogicalProcessorInformation(info, &info_size);
if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
free(info);
info = static_cast<Info*>(malloc(info_size));
if (info == nullptr) return 0;
success = GetLogicalProcessorInformation(info, &info_size);
}
DWORD logicalProcessorCount = 0;
if (success) {
Info* ptr = info;
DWORD byteOffset = 0;
while (byteOffset + sizeof(Info) <= info_size) {
switch (ptr->Relationship) {
case RelationProcessorCore:
logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask);
break;
case RelationNumaNode:
case RelationCache:
case RelationProcessorPackage:
break;
default:
break;
}
byteOffset += sizeof(Info);
ptr++;
}
}
free(info);
return static_cast<int>(logicalProcessorCount);
}
#endif
}
static int GetNumCPUs() {
#if defined(__myriad2__)
return 1;
#elif defined(_WIN32)
const int hardware_concurrency = Win32NumCPUs();
return hardware_concurrency ? hardware_concurrency : 1;
#elif defined(_AIX)
return sysconf(_SC_NPROCESSORS_ONLN);
#else
return static_cast<int>(std::thread::hardware_concurrency());
#endif
}
#if defined(_WIN32)
static double GetNominalCPUFrequency() {
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
return 1.0;
#else
#pragma comment(lib, "advapi32.lib")
HKEY key;
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
KEY_READ, &key) == ERROR_SUCCESS) {
DWORD type = 0;
DWORD data = 0;
DWORD data_size = sizeof(data);
auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
reinterpret_cast<LPBYTE>(&data), &data_size);
RegCloseKey(key);
if (result == ERROR_SUCCESS && type == REG_DWORD &&
data_size == sizeof(data)) {
return data * 1e6;
}
}
return 1.0;
#endif
}
#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
static double GetNominalCPUFrequency() {
unsigned freq;
size_t size = sizeof(freq);
int mib[2] = {CTL_HW, HW_CPU_FREQ};
if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
return static_cast<double>(freq);
}
return 1.0;
}
#else
static bool ReadLongFromFile(const char *file, long *value) {
bool ret = false;
#if defined(_POSIX_C_SOURCE)
const int file_mode = (O_RDONLY | O_CLOEXEC);
#else
const int file_mode = O_RDONLY;
#endif
int fd = open(file, file_mode);
if (fd != -1) {
char line[1024];
char *err;
memset(line, '\0', sizeof(line));
ssize_t len;
do {
len = read(fd, line, sizeof(line) - 1);
} while (len < 0 && errno == EINTR);
if (len <= 0) {
ret = false;
} else {
const long temp_value = strtol(line, &err, 10);
if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
*value = temp_value;
ret = true;
}
}
close(fd);
}
return ret;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
static int64_t ReadMonotonicClockNanos() {
struct timespec t;
#ifdef CLOCK_MONOTONIC_RAW
int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
#else
int rc = clock_gettime(CLOCK_MONOTONIC, &t);
#endif
if (rc != 0) {
ABSL_INTERNAL_LOG(
FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
}
return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
}
class UnscaledCycleClockWrapperForInitializeFrequency {
public:
static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
};
struct TimeTscPair {
int64_t time;
int64_t tsc;
};
static TimeTscPair GetTimeTscPair() {
int64_t best_latency = std::numeric_limits<int64_t>::max();
TimeTscPair best;
for (int i = 0; i < 10; ++i) {
int64_t t0 = ReadMonotonicClockNanos();
int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
int64_t t1 = ReadMonotonicClockNanos();
int64_t latency = t1 - t0;
if (latency < best_latency) {
best_latency = latency;
best.time = t0;
best.tsc = tsc;
}
}
return best;
}
static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
auto t0 = GetTimeTscPair();
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = sleep_nanoseconds;
while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
auto t1 = GetTimeTscPair();
double elapsed_ticks = t1.tsc - t0.tsc;
double elapsed_time = (t1.time - t0.time) * 1e-9;
return elapsed_ticks / elapsed_time;
}
static double MeasureTscFrequency() {
double last_measurement = -1.0;
int sleep_nanoseconds = 1000000;
for (int i = 0; i < 8; ++i) {
double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
if (measurement * 0.99 < last_measurement &&
last_measurement < measurement * 1.01) {
return measurement;
}
last_measurement = measurement;
sleep_nanoseconds *= 2;
}
return last_measurement;
}
#endif
static double GetNominalCPUFrequency() {
long freq = 0;
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
return freq * 1e3;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
return MeasureTscFrequency();
#else
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
return freq * 1e3;
}
return 1.0;
#endif
}
#endif
ABSL_CONST_INIT static once_flag init_num_cpus_once;
ABSL_CONST_INIT static int num_cpus = 0;
int NumCPUs() {
base_internal::LowLevelCallOnce(
&init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
return num_cpus;
}
ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
double NominalCPUFrequency() {
base_internal::LowLevelCallOnce(
&init_nominal_cpu_frequency_once,
[]() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
return nominal_cpu_frequency;
}
#if defined(_WIN32)
pid_t GetTID() {
return pid_t{GetCurrentThreadId()};
}
#elif defined(__linux__)
#ifndef SYS_gettid
#define SYS_gettid __NR_gettid
#endif
pid_t GetTID() {
return static_cast<pid_t>(syscall(SYS_gettid));
}
#elif defined(__akaros__)
pid_t GetTID() {
if (in_vcore_context())
return 0;
return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
}
#elif defined(__myriad2__)
pid_t GetTID() {
uint32_t tid;
rtems_task_ident(RTEMS_SELF, 0, &tid);
return tid;
}
#elif defined(__APPLE__)
pid_t GetTID() {
uint64_t tid;
pthread_threadid_np(nullptr, &tid);
return static_cast<pid_t>(tid);
}
#elif defined(__FreeBSD__)
pid_t GetTID() { return static_cast<pid_t>(pthread_getthreadid_np()); }
#elif defined(__OpenBSD__)
pid_t GetTID() { return getthrid(); }
#elif defined(__NetBSD__)
pid_t GetTID() { return static_cast<pid_t>(_lwp_self()); }
#elif defined(__native_client__)
pid_t GetTID() {
auto* thread = pthread_self();
static_assert(sizeof(pid_t) == sizeof(thread),
"In NaCL int expected to be the same size as a pointer");
return reinterpret_cast<pid_t>(thread);
}
#elif defined(__Fuchsia__)
pid_t GetTID() {
return static_cast<pid_t>(zx_thread_self());
}
#else
pid_t GetTID() {
return static_cast<pid_t>(pthread_self());
}
#endif
pid_t GetCachedTID() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID();
return thread_id;
#else
return GetTID();
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/sysinfo.h"
#ifndef _WIN32
#include <sys/types.h>
#include <unistd.h>
#endif
#include <thread>
#include <unordered_set>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
TEST(SysinfoTest, NumCPUs) {
EXPECT_NE(NumCPUs(), 0)
<< "NumCPUs() should not have the default value of 0";
}
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID());
#ifdef __native_client__
return;
#endif
for (int i = 0; i < 10; ++i) {
constexpr int kNumThreads = 10;
Barrier all_threads_done(kNumThreads);
std::vector<std::thread> threads;
Mutex mutex;
std::unordered_set<pid_t> tids;
for (int j = 0; j < kNumThreads; ++j) {
threads.push_back(std::thread([&]() {
pid_t id = GetTID();
{
MutexLock lock(&mutex);
ASSERT_TRUE(tids.find(id) == tids.end());
tids.insert(id);
}
all_threads_done.Block();
}));
}
for (auto& thread : threads) {
thread.join();
}
}
}
#ifdef __linux__
TEST(SysinfoTest, LinuxGetTID) {
EXPECT_EQ(GetTID(), getpid());
}
#endif
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/sysinfo.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/sysinfo_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f7c5503c-3601-4d95-bf3c-810508313e4c | cpp | tensorflow/tensorflow | tfr_decompose_ctx | tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.cc | tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc | #include "tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_attr.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.h"
#include "tensorflow/compiler/mlir/tfr/passes/passes.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/env_var.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfr {
const char* const kTFRLibEnv = "TF_MLIR_TFR_LIB_DIR";
absl::StatusOr<std::unique_ptr<TFRDecomposeContext>> TFRDecomposeContext::Get(
mlir::MLIRContext* mlir_ctx) {
Env* env = Env::Default();
std::string tfr_lib_dir;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
kTFRLibEnv, "tensorflow/compiler/mlir/tfr/resources", &tfr_lib_dir));
string composite_mlir_dir = io::JoinPath(env->GetRunfilesDir(), tfr_lib_dir);
std::vector<string> files;
TF_RETURN_IF_ERROR(env->GetChildren(composite_mlir_dir, &files));
if (files.empty()) {
return errors::Internal(absl::StrCat(
"Failed to find the decomposition lib from path ", composite_mlir_dir));
}
std::string tfr_raw_text;
for (const auto& file : files) {
string fullpath = io::JoinPath(composite_mlir_dir, file);
if (env->MatchPath(fullpath, io::JoinPath(composite_mlir_dir, "*.mlir"))) {
std::string text;
TF_RETURN_IF_ERROR(ReadFileToString(env, fullpath, &text));
tfr_raw_text.append(text);
}
}
auto ctx = TFRDecomposeContext::GetFromText(tfr_raw_text, mlir_ctx);
if (!ctx) {
return errors::Internal(absl::StrCat(
"Failed to load the imported decomposition lib: ", tfr_raw_text));
}
return ctx;
}
std::unique_ptr<TFRDecomposeContext> TFRDecomposeContext::GetFromText(
StringPiece tfr_raw_text, mlir::MLIRContext* mlir_ctx) {
mlir_ctx->allowUnregisteredDialects(true);
mlir::DialectRegistry registry;
registry.insert<mlir::arith::ArithDialect,
mlir::func::FuncDialect,
mlir::scf::SCFDialect,
mlir::shape::ShapeDialect,
mlir::TF::TensorFlowDialect,
mlir::tf_device::TensorFlowDeviceDialect,
mlir::tf_executor::TensorFlowExecutorDialect,
mlir::TFR::TFRDialect>();
mlir::func::registerAllExtensions(registry);
mlir_ctx->appendDialectRegistry(registry);
mlir_ctx->loadAllAvailableDialects();
auto memory_buffer = llvm::MemoryBuffer::getMemBuffer(
llvm::StringRef(tfr_raw_text.data(), tfr_raw_text.size()));
llvm::SourceMgr source_mgr;
source_mgr.AddNewSourceBuffer(std::move(memory_buffer), llvm::SMLoc());
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceFile<mlir::ModuleOp>(source_mgr, mlir_ctx);
auto module_op = module.release();
return std::make_unique<TFRDecomposeContext>(module_op);
}
absl::StatusOr<FunctionDef> TFRDecomposeContext::ExpandNode(
const NodeDef& node_def, StringPiece func_name) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def));
DataTypeVector input_dtys, output_dtys;
TF_RETURN_IF_ERROR(InputTypesForNode(node_def, *op_def, &input_dtys));
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, *op_def, &output_dtys));
mlir::MLIRContext* context = tfr_module_.getContext();
llvm::SmallVector<mlir::Type, 4> input_tys, output_tys;
mlir::Builder builder(context);
for (auto ty : input_dtys) {
mlir::Type elt_ty;
TF_RETURN_IF_ERROR(ConvertDataType(ty, builder, &elt_ty));
mlir::TensorType mlir_ty = mlir::UnrankedTensorType::get(elt_ty);
input_tys.push_back(mlir_ty);
}
for (auto ty : output_dtys) {
mlir::Type elt_ty;
TF_RETURN_IF_ERROR(ConvertDataType(ty, builder, &elt_ty));
mlir::TensorType mlir_ty = mlir::UnrankedTensorType::get(elt_ty);
output_tys.push_back(mlir_ty);
}
llvm::SmallVector<mlir::NamedAttribute, 4> attrs;
for (const auto& attr : node_def.attr()) {
TF_ASSIGN_OR_RETURN(auto mlir_attr,
ConvertAttributeValue(attr.second, &builder));
attrs.push_back({mlir::StringAttr::get(context, attr.first), mlir_attr});
}
mlir::Location loc = mlir::UnknownLoc::get(context);
mlir::ModuleOp module = mlir::ModuleOp::create(loc);
mlir::FunctionType func_type =
mlir::FunctionType::get(context, input_tys, output_tys);
llvm::StringRef func_name_str(func_name.data(), func_name.size());
auto func = mlir::func::FuncOp::create(loc, func_name_str, func_type, {});
module.push_back(func);
func.addEntryBlock();
mlir::OpBuilder op_builder(func.getBody());
const std::string tf_op_full_name = absl::StrCat("tf.", node_def.op());
mlir::OperationState op_state(loc, tf_op_full_name);
op_state.addOperands(func.getArguments());
op_state.addTypes(output_tys);
op_state.addAttributes(attrs);
mlir::Operation* tf_op = op_builder.create(op_state);
op_builder.create<mlir::func::ReturnOp>(loc, tf_op->getResults());
TF_RETURN_IF_ERROR(DecomposeGraph(module));
FunctionDef func_def;
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(
func, export_confs_, &func_def));
module.erase();
return func_def;
}
Status TFRDecomposeContext::DecomposeGraph(mlir::ModuleOp user_module) {
if (failed(pm_.run(user_module))) {
return errors::Internal("Failed to run the decompose passes.");
}
return absl::OkStatus();
}
TFRDecomposeContext::TFRDecomposeContext(mlir::ModuleOp tfr_module)
: tfr_module_(tfr_module), pm_(tfr_module_.getContext()) {
mlir::OpPassManager& func_pm = pm_.nest<mlir::func::FuncOp>();
func_pm.addPass(mlir::CreateExecutorDialectToFunctionalConversionPass());
func_pm.addPass(mlir::TFR::CreateDecomposeTFOpsPass(tfr_module_));
func_pm.addPass(mlir::TFR::CreateRaiseToTFOpsPass(
tfr_module_, true));
func_pm.addPass(mlir::CreateFunctionalToExecutorDialectConversionPass());
pm_.addPass(mlir::CreateBreakUpIslandsPass());
}
void TFRDecomposeContext::Destroy() { tfr_module_.erase(); }
absl::StatusOr<FunctionDef> ExpandNode(const NodeDef& node_def,
StringPiece func_name) {
mlir::MLIRContext mlir_ctx;
TF_ASSIGN_OR_RETURN(auto ctx, TFRDecomposeContext::Get(&mlir_ctx));
return ctx->ExpandNode(node_def, func_name);
}
Status DecomposeGraph(mlir::ModuleOp user_module) {
mlir::MLIRContext* mlir_ctx = user_module.getContext();
TF_ASSIGN_OR_RETURN(auto ctx, TFRDecomposeContext::Get(mlir_ctx));
return ctx->DecomposeGraph(user_module);
}
}
} | #include "tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.h"
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
using testing::ElementsAreArray;
using testing::Test;
using NodeAndType = std::pair<std::string, tensorflow::DataType>;
namespace tensorflow {
namespace {
REGISTER_OP("MyAddN")
.Input("inputs: N * T")
.Output("sum: T")
.Attr("N: int >= 1")
.Attr("T: {numbertype, variant}")
.SetIsCommutative()
.SetIsAggregate()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RiscAddDummy")
.Input("x: T")
.Input("y: T")
.Output("z: T")
.Attr(
"T: {bfloat16, half, float, double, uint8, int8, int16, int32, int64, "
"complex64, complex128, string}")
.SetShapeFn(shape_inference::UnchangedShape);
constexpr char tfr_raw_text[] = R"(
tfr.func @tf__my_add_n(%values: !tfr.tensor_list,
%n: i64 {tfr.name="N"}) -> !tfr.tensor {
%index = arith.constant 0 : index
%cst = arith.constant 1 : i64
%eq = arith.cmpi "eq", %n, %cst : i64
%v1 = tfr.get_element %values[%index] : (!tfr.tensor_list, index) -> !tfr.tensor
%res = scf.if %eq -> !tfr.tensor {
scf.yield %v1 : !tfr.tensor
} else {
%step = arith.index_cast %cst : i64 to index
%end = arith.index_cast %n : i64 to index
%reduce = scf.for %i = %step to %end step %step iter_args(%reduce_iter=%v1) -> !tfr.tensor {
%v = tfr.get_element %values[%i] : (!tfr.tensor_list, index) -> !tfr.tensor
%reduce_next = tfr.call @tf__risc_add_dummy(%reduce_iter, %v) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor
scf.yield %reduce_next : !tfr.tensor
}
scf.yield %reduce : !tfr.tensor
}
tfr.return %res : !tfr.tensor
}
tfr.func @tf__my_add_n_(!tfr.tensor_list<N,T>, i64 {tfr.name="N"}) -> !tfr.tensor attributes{N,T}
tfr.func @tf__risc_add_dummy_(!tfr.tensor<T>, !tfr.tensor<T>) -> !tfr.tensor<T> attributes{T}
)";
class TFRDecomposeContextTest : public Test {
protected:
void SetUp() override {
test_ctx_ = tfr::TFRDecomposeContext::GetFromText(tfr_raw_text, &ctx_);
}
void TearDown() override { test_ctx_->Destroy(); }
mlir::MLIRContext ctx_;
std::unique_ptr<tfr::TFRDecomposeContext> test_ctx_;
};
std::vector<NodeAndType> NodesSequenceOf(const FunctionDef& graph) {
std::vector<NodeAndType> nodes;
for (auto& node : graph.node_def()) {
nodes.push_back({node.op(), node.attr().at("T").type()});
}
return nodes;
}
TEST_F(TFRDecomposeContextTest, FLOAT_1_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("input", 0, DT_FLOAT);
NodeDef test_node;
auto status = NodeDefBuilder("float_add", "MyAddN")
.Input(src_list)
.Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"Identity", DT_FLOAT}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
TEST_F(TFRDecomposeContextTest, FLOAT_3_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("in0", 0, DT_FLOAT);
src_list.emplace_back("in1", 0, DT_FLOAT);
src_list.emplace_back("in2", 0, DT_FLOAT);
NodeDef test_node;
auto status = NodeDefBuilder("float_add_3", "MyAddN")
.Input(src_list)
.Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"RiscAddDummy", DT_FLOAT},
{"RiscAddDummy", DT_FLOAT}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
TEST_F(TFRDecomposeContextTest, INT32_3_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("in0", 0, DT_INT32);
src_list.emplace_back("in1", 0, DT_INT32);
src_list.emplace_back("in2", 0, DT_INT32);
NodeDef test_node;
auto status =
NodeDefBuilder("int_add", "MyAddN").Input(src_list).Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"RiscAddDummy", DT_INT32},
{"RiscAddDummy", DT_INT32}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e1b571f-877a-4ea3-8859-49202fc0709f | cpp | tensorflow/tensorflow | resampler | tensorflow/lite/delegates/gpu/gl/kernels/resampler.cc | tensorflow/lite/delegates/gpu/cl/kernels/resampler_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/resampler.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Resampler : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::vector<Variable> parameters = {
{"src_height", static_cast<int>(ctx.input_shapes[0][1])},
{"src_width", static_cast<int>(ctx.input_shapes[0][2])},
};
std::string source = R"(
highp int X = int(gid.x);
highp int Y = int(gid.y);
highp int S = int(gid.z);
highp vec2 f_coords = ($input_data_1[X, Y, 0]$).xy;
highp vec2 f_coords_floor = floor(f_coords);
highp ivec4 st;
st.xy = ivec2(f_coords_floor.x, f_coords_floor.y);
st.zw = st.xy + ivec2(1, 1);
highp vec2 t = f_coords - f_coords_floor;
bool stx_in = st.x >= 0 && st.x < $src_width$;
bool stz_in = st.z >= 0 && st.z < $src_width$;
bool sty_in = st.y >= 0 && st.y < $src_height$;
bool stw_in = st.w >= 0 && st.w < $src_height$;
vec4 src0 = (stx_in && sty_in) ? $input_data_0[st.x, st.y, S]$ : vec4(0.0);
vec4 src1 = (stz_in && sty_in) ? $input_data_0[st.z, st.y, S]$ : vec4(0.0);
vec4 src2 = (stx_in && stw_in) ? $input_data_0[st.x, st.w, S]$ : vec4(0.0);
vec4 src3 = (stz_in && stw_in) ? $input_data_0[st.z, st.w, S]$ : vec4(0.0);
value_0 = mix(mix(src0, src1, t.x), mix(src2, src3, t.x), t.y);
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewResamplerNodeShader() {
return std::make_unique<Resampler>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/resampler_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ResamplerIdentity) {
auto status = ResamplerIdentityTest(BHWC(1, 2, 2, 1), &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = ResamplerIdentityTest(BHWC(1, 3, 5, 3), &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
status = ResamplerIdentityTest(BHWC(1, 6, 1, 7), &exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/resampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/resampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
017b88cb-d4f6-4058-982e-183d7d3cc96e | cpp | google/quiche | simulator | quiche/quic/test_tools/simulator/simulator.cc | quiche/quic/test_tools/simulator/simulator_test.cc | #include "quiche/quic/test_tools/simulator/simulator.h"
#include <utility>
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace simulator {
Simulator::Simulator() : Simulator(nullptr) {}
Simulator::Simulator(QuicRandom* random_generator)
: random_generator_(random_generator),
alarm_factory_(this, "Default Alarm Manager"),
run_for_should_stop_(false),
enable_random_delays_(false) {
run_for_alarm_.reset(
alarm_factory_.CreateAlarm(new RunForDelegate(&run_for_should_stop_)));
}
Simulator::~Simulator() {
run_for_alarm_.reset();
}
Simulator::Clock::Clock() : now_(kStartTime) {}
QuicTime Simulator::Clock::ApproximateNow() const { return now_; }
QuicTime Simulator::Clock::Now() const { return now_; }
QuicWallTime Simulator::Clock::WallNow() const {
return QuicWallTime::FromUNIXMicroseconds(
(now_ - QuicTime::Zero()).ToMicroseconds());
}
void Simulator::AddActor(Actor* actor) {
auto emplace_times_result =
scheduled_times_.insert(std::make_pair(actor, QuicTime::Infinite()));
auto emplace_names_result = actor_names_.insert(actor->name());
QUICHE_DCHECK(emplace_times_result.second);
QUICHE_DCHECK(emplace_names_result.second);
}
void Simulator::RemoveActor(Actor* actor) {
auto scheduled_time_it = scheduled_times_.find(actor);
auto actor_names_it = actor_names_.find(actor->name());
QUICHE_DCHECK(scheduled_time_it != scheduled_times_.end());
QUICHE_DCHECK(actor_names_it != actor_names_.end());
QuicTime scheduled_time = scheduled_time_it->second;
if (scheduled_time != QuicTime::Infinite()) {
Unschedule(actor);
}
scheduled_times_.erase(scheduled_time_it);
actor_names_.erase(actor_names_it);
}
void Simulator::Schedule(Actor* actor, QuicTime new_time) {
auto scheduled_time_it = scheduled_times_.find(actor);
QUICHE_DCHECK(scheduled_time_it != scheduled_times_.end());
QuicTime scheduled_time = scheduled_time_it->second;
if (scheduled_time <= new_time) {
return;
}
if (scheduled_time != QuicTime::Infinite()) {
Unschedule(actor);
}
scheduled_time_it->second = new_time;
schedule_.insert(std::make_pair(new_time, actor));
}
void Simulator::Unschedule(Actor* actor) {
auto scheduled_time_it = scheduled_times_.find(actor);
QUICHE_DCHECK(scheduled_time_it != scheduled_times_.end());
QuicTime scheduled_time = scheduled_time_it->second;
QUICHE_DCHECK(scheduled_time != QuicTime::Infinite());
auto range = schedule_.equal_range(scheduled_time);
for (auto it = range.first; it != range.second; ++it) {
if (it->second == actor) {
schedule_.erase(it);
scheduled_time_it->second = QuicTime::Infinite();
return;
}
}
QUICHE_DCHECK(false);
}
const QuicClock* Simulator::GetClock() const { return &clock_; }
QuicRandom* Simulator::GetRandomGenerator() {
if (random_generator_ == nullptr) {
random_generator_ = QuicRandom::GetInstance();
}
return random_generator_;
}
quiche::QuicheBufferAllocator* Simulator::GetStreamSendBufferAllocator() {
return &buffer_allocator_;
}
QuicAlarmFactory* Simulator::GetAlarmFactory() { return &alarm_factory_; }
Simulator::RunForDelegate::RunForDelegate(bool* run_for_should_stop)
: run_for_should_stop_(run_for_should_stop) {}
void Simulator::RunForDelegate::OnAlarm() { *run_for_should_stop_ = true; }
void Simulator::RunFor(QuicTime::Delta time_span) {
QUICHE_DCHECK(!run_for_alarm_->IsSet());
const QuicTime end_time = clock_.Now() + time_span;
run_for_alarm_->Set(end_time);
run_for_should_stop_ = false;
bool simulation_result = RunUntil([this]() { return run_for_should_stop_; });
QUICHE_DCHECK(simulation_result);
QUICHE_DCHECK(clock_.Now() == end_time);
}
void Simulator::HandleNextScheduledActor() {
const auto current_event_it = schedule_.begin();
QuicTime event_time = current_event_it->first;
Actor* actor = current_event_it->second;
QUIC_DVLOG(3) << "At t = " << event_time.ToDebuggingValue() << ", calling "
<< actor->name();
Unschedule(actor);
if (clock_.Now() > event_time) {
QUIC_BUG(quic_bug_10150_1)
<< "Error: event registered by [" << actor->name()
<< "] requires travelling back in time. Current time: "
<< clock_.Now().ToDebuggingValue()
<< ", scheduled time: " << event_time.ToDebuggingValue();
}
clock_.now_ = event_time;
actor->Act();
}
}
} | #include "quiche/quic/test_tools/simulator/simulator.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/node_hash_map.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simulator/alarm_factory.h"
#include "quiche/quic/test_tools/simulator/link.h"
#include "quiche/quic/test_tools/simulator/packet_filter.h"
#include "quiche/quic/test_tools/simulator/queue.h"
#include "quiche/quic/test_tools/simulator/switch.h"
#include "quiche/quic/test_tools/simulator/traffic_policer.h"
using testing::_;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace simulator {
class Counter : public Actor {
public:
Counter(Simulator* simulator, std::string name, QuicTime::Delta period)
: Actor(simulator, name), value_(-1), period_(period) {
Schedule(clock_->Now());
}
~Counter() override {}
inline int get_value() const { return value_; }
void Act() override {
++value_;
QUIC_DVLOG(1) << name_ << " has value " << value_ << " at time "
<< clock_->Now().ToDebuggingValue();
Schedule(clock_->Now() + period_);
}
private:
int value_;
QuicTime::Delta period_;
};
class SimulatorTest : public quic::test::QuicTest {};
TEST_F(SimulatorTest, Counters) {
Simulator simulator;
for (int i = 0; i < 2; ++i) {
Counter fast_counter(&simulator, "fast_counter",
QuicTime::Delta::FromSeconds(3));
Counter slow_counter(&simulator, "slow_counter",
QuicTime::Delta::FromSeconds(10));
simulator.RunUntil(
[&slow_counter]() { return slow_counter.get_value() >= 10; });
EXPECT_EQ(10, slow_counter.get_value());
EXPECT_EQ(10 * 10 / 3, fast_counter.get_value());
}
}
class CounterPort : public UnconstrainedPortInterface {
public:
CounterPort() { Reset(); }
~CounterPort() override {}
inline QuicByteCount bytes() const { return bytes_; }
inline QuicPacketCount packets() const { return packets_; }
void AcceptPacket(std::unique_ptr<Packet> packet) override {
bytes_ += packet->size;
packets_ += 1;
per_destination_packet_counter_[packet->destination] += 1;
}
void Reset() {
bytes_ = 0;
packets_ = 0;
per_destination_packet_counter_.clear();
}
QuicPacketCount CountPacketsForDestination(std::string destination) const {
auto result_it = per_destination_packet_counter_.find(destination);
if (result_it == per_destination_packet_counter_.cend()) {
return 0;
}
return result_it->second;
}
private:
QuicByteCount bytes_;
QuicPacketCount packets_;
absl::node_hash_map<std::string, QuicPacketCount>
per_destination_packet_counter_;
};
class LinkSaturator : public Endpoint {
public:
LinkSaturator(Simulator* simulator, std::string name,
QuicByteCount packet_size, std::string destination)
: Endpoint(simulator, name),
packet_size_(packet_size),
destination_(std::move(destination)),
bytes_transmitted_(0),
packets_transmitted_(0) {
Schedule(clock_->Now());
}
void Act() override {
if (tx_port_->TimeUntilAvailable().IsZero()) {
auto packet = std::make_unique<Packet>();
packet->source = name_;
packet->destination = destination_;
packet->tx_timestamp = clock_->Now();
packet->size = packet_size_;
tx_port_->AcceptPacket(std::move(packet));
bytes_transmitted_ += packet_size_;
packets_transmitted_ += 1;
}
Schedule(clock_->Now() + tx_port_->TimeUntilAvailable());
}
UnconstrainedPortInterface* GetRxPort() override {
return static_cast<UnconstrainedPortInterface*>(&rx_port_);
}
void SetTxPort(ConstrainedPortInterface* port) override { tx_port_ = port; }
CounterPort* counter() { return &rx_port_; }
inline QuicByteCount bytes_transmitted() const { return bytes_transmitted_; }
inline QuicPacketCount packets_transmitted() const {
return packets_transmitted_;
}
void Pause() { Unschedule(); }
void Resume() { Schedule(clock_->Now()); }
private:
QuicByteCount packet_size_;
std::string destination_;
ConstrainedPortInterface* tx_port_;
CounterPort rx_port_;
QuicByteCount bytes_transmitted_;
QuicPacketCount packets_transmitted_;
};
TEST_F(SimulatorTest, DirectLinkSaturation) {
Simulator simulator;
LinkSaturator saturator_a(&simulator, "Saturator A", 1000, "Saturator B");
LinkSaturator saturator_b(&simulator, "Saturator B", 100, "Saturator A");
SymmetricLink link(&saturator_a, &saturator_b,
QuicBandwidth::FromKBytesPerSecond(1000),
QuicTime::Delta::FromMilliseconds(100) +
QuicTime::Delta::FromMicroseconds(1));
const QuicTime start_time = simulator.GetClock()->Now();
const QuicTime after_first_50_ms =
start_time + QuicTime::Delta::FromMilliseconds(50);
simulator.RunUntil([&simulator, after_first_50_ms]() {
return simulator.GetClock()->Now() >= after_first_50_ms;
});
EXPECT_LE(1000u * 50u, saturator_a.bytes_transmitted());
EXPECT_GE(1000u * 51u, saturator_a.bytes_transmitted());
EXPECT_LE(1000u * 50u, saturator_b.bytes_transmitted());
EXPECT_GE(1000u * 51u, saturator_b.bytes_transmitted());
EXPECT_LE(50u, saturator_a.packets_transmitted());
EXPECT_GE(51u, saturator_a.packets_transmitted());
EXPECT_LE(500u, saturator_b.packets_transmitted());
EXPECT_GE(501u, saturator_b.packets_transmitted());
EXPECT_EQ(0u, saturator_a.counter()->bytes());
EXPECT_EQ(0u, saturator_b.counter()->bytes());
simulator.RunUntil([&saturator_a, &saturator_b]() {
if (saturator_a.counter()->packets() > 1000 ||
saturator_b.counter()->packets() > 100) {
ADD_FAILURE() << "The simulation did not arrive at the expected "
"termination contidition. Saturator A counter: "
<< saturator_a.counter()->packets()
<< ", saturator B counter: "
<< saturator_b.counter()->packets();
return true;
}
return saturator_a.counter()->packets() == 1000 &&
saturator_b.counter()->packets() == 100;
});
EXPECT_EQ(201u, saturator_a.packets_transmitted());
EXPECT_EQ(2001u, saturator_b.packets_transmitted());
EXPECT_EQ(201u * 1000, saturator_a.bytes_transmitted());
EXPECT_EQ(2001u * 100, saturator_b.bytes_transmitted());
EXPECT_EQ(1000u,
saturator_a.counter()->CountPacketsForDestination("Saturator A"));
EXPECT_EQ(100u,
saturator_b.counter()->CountPacketsForDestination("Saturator B"));
EXPECT_EQ(0u,
saturator_a.counter()->CountPacketsForDestination("Saturator B"));
EXPECT_EQ(0u,
saturator_b.counter()->CountPacketsForDestination("Saturator A"));
const QuicTime end_time = simulator.GetClock()->Now();
const QuicBandwidth observed_bandwidth = QuicBandwidth::FromBytesAndTimeDelta(
saturator_a.bytes_transmitted(), end_time - start_time);
EXPECT_APPROX_EQ(link.bandwidth(), observed_bandwidth, 0.01f);
}
class PacketAcceptor : public ConstrainedPortInterface {
public:
void AcceptPacket(std::unique_ptr<Packet> packet) override {
packets_.emplace_back(std::move(packet));
}
QuicTime::Delta TimeUntilAvailable() override {
return QuicTime::Delta::Zero();
}
std::vector<std::unique_ptr<Packet>>* packets() { return &packets_; }
private:
std::vector<std::unique_ptr<Packet>> packets_;
};
TEST_F(SimulatorTest, Queue) {
Simulator simulator;
Queue queue(&simulator, "Queue", 1000);
PacketAcceptor acceptor;
queue.set_tx_port(&acceptor);
EXPECT_EQ(0u, queue.bytes_queued());
EXPECT_EQ(0u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
auto first_packet = std::make_unique<Packet>();
first_packet->size = 600;
queue.AcceptPacket(std::move(first_packet));
EXPECT_EQ(600u, queue.bytes_queued());
EXPECT_EQ(1u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
auto second_packet = std::make_unique<Packet>();
second_packet->size = 500;
queue.AcceptPacket(std::move(second_packet));
EXPECT_EQ(600u, queue.bytes_queued());
EXPECT_EQ(1u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
auto third_packet = std::make_unique<Packet>();
third_packet->size = 400;
queue.AcceptPacket(std::move(third_packet));
EXPECT_EQ(1000u, queue.bytes_queued());
EXPECT_EQ(2u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
simulator.RunUntil([]() { return false; });
EXPECT_EQ(0u, queue.bytes_queued());
EXPECT_EQ(0u, queue.packets_queued());
ASSERT_EQ(2u, acceptor.packets()->size());
EXPECT_EQ(600u, acceptor.packets()->at(0)->size);
EXPECT_EQ(400u, acceptor.packets()->at(1)->size);
}
TEST_F(SimulatorTest, QueueBottleneck) {
const QuicBandwidth local_bandwidth =
QuicBandwidth::FromKBytesPerSecond(1000);
const QuicBandwidth bottleneck_bandwidth = 0.1f * local_bandwidth;
const QuicTime::Delta local_propagation_delay =
QuicTime::Delta::FromMilliseconds(1);
const QuicTime::Delta bottleneck_propagation_delay =
QuicTime::Delta::FromMilliseconds(20);
const QuicByteCount bdp =
bottleneck_bandwidth *
(local_propagation_delay + bottleneck_propagation_delay);
Simulator simulator;
LinkSaturator saturator(&simulator, "Saturator", 1000, "Counter");
ASSERT_GE(bdp, 1000u);
Queue queue(&simulator, "Queue", bdp);
CounterPort counter;
OneWayLink local_link(&simulator, "Local link", &queue, local_bandwidth,
local_propagation_delay);
OneWayLink bottleneck_link(&simulator, "Bottleneck link", &counter,
bottleneck_bandwidth,
bottleneck_propagation_delay);
saturator.SetTxPort(&local_link);
queue.set_tx_port(&bottleneck_link);
static const QuicPacketCount packets_received = 1000;
simulator.RunUntil(
[&counter]() { return counter.packets() == packets_received; });
const double loss_ratio = 1 - static_cast<double>(packets_received) /
saturator.packets_transmitted();
EXPECT_NEAR(loss_ratio, 0.9, 0.001);
}
TEST_F(SimulatorTest, OnePacketQueue) {
const QuicBandwidth local_bandwidth =
QuicBandwidth::FromKBytesPerSecond(1000);
const QuicBandwidth bottleneck_bandwidth = 0.1f * local_bandwidth;
const QuicTime::Delta local_propagation_delay =
QuicTime::Delta::FromMilliseconds(1);
const QuicTime::Delta bottleneck_propagation_delay =
QuicTime::Delta::FromMilliseconds(20);
Simulator simulator;
LinkSaturator saturator(&simulator, "Saturator", 1000, "Counter");
Queue queue(&simulator, "Queue", 1000);
CounterPort counter;
OneWayLink local_link(&simulator, "Local link", &queue, local_bandwidth,
local_propagation_delay);
OneWayLink bottleneck_link(&simulator, "Bottleneck link", &counter,
bottleneck_bandwidth,
bottleneck_propagation_delay);
saturator.SetTxPort(&local_link);
queue.set_tx_port(&bottleneck_link);
static const QuicPacketCount packets_received = 10;
const QuicTime deadline =
simulator.GetClock()->Now() + QuicTime::Delta::FromSeconds(10);
simulator.RunUntil([&simulator, &counter, deadline]() {
return counter.packets() == packets_received ||
simulator.GetClock()->Now() > deadline;
});
ASSERT_EQ(packets_received, counter.packets());
}
TEST_F(SimulatorTest, SwitchedNetwork) {
const QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(10000);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(50);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 1000, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 1000, "Saturator 3");
LinkSaturator saturator3(&simulator, "Saturator 3", 1000, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, network_switch.port(2), bandwidth,
base_propagation_delay * 2);
SymmetricLink link3(&saturator3, network_switch.port(3), bandwidth,
base_propagation_delay * 3);
const QuicTime start_time = simulator.GetClock()->Now();
static const QuicPacketCount bytes_received = 64 * 1000;
simulator.RunUntil([&saturator1]() {
return saturator1.counter()->bytes() >= bytes_received;
});
const QuicTime end_time = simulator.GetClock()->Now();
const QuicBandwidth observed_bandwidth = QuicBandwidth::FromBytesAndTimeDelta(
bytes_received, end_time - start_time);
const double bandwidth_ratio =
static_cast<double>(observed_bandwidth.ToBitsPerSecond()) /
bandwidth.ToBitsPerSecond();
EXPECT_NEAR(1, bandwidth_ratio, 0.1);
const double normalized_received_packets_for_saturator_2 =
static_cast<double>(saturator2.counter()->packets()) /
saturator1.counter()->packets();
const double normalized_received_packets_for_saturator_3 =
static_cast<double>(saturator3.counter()->packets()) /
saturator1.counter()->packets();
EXPECT_NEAR(1, normalized_received_packets_for_saturator_2, 0.1);
EXPECT_NEAR(1, normalized_received_packets_for_saturator_3, 0.1);
EXPECT_EQ(0u,
saturator2.counter()->CountPacketsForDestination("Saturator 1"));
EXPECT_EQ(0u,
saturator3.counter()->CountPacketsForDestination("Saturator 1"));
EXPECT_EQ(1u,
saturator1.counter()->CountPacketsForDestination("Saturator 2"));
EXPECT_EQ(1u,
saturator3.counter()->CountPacketsForDestination("Saturator 2"));
EXPECT_EQ(1u,
saturator1.counter()->CountPacketsForDestination("Saturator 3"));
EXPECT_EQ(1u,
saturator2.counter()->CountPacketsForDestination("Saturator 3"));
}
class AlarmToggler : public Actor {
public:
AlarmToggler(Simulator* simulator, std::string name, QuicAlarm* alarm,
QuicTime::Delta interval)
: Actor(simulator, name),
alarm_(alarm),
interval_(interval),
deadline_(alarm->deadline()),
times_set_(0),
times_cancelled_(0) {
EXPECT_TRUE(alarm->IsSet());
EXPECT_GE(alarm->deadline(), clock_->Now());
Schedule(clock_->Now());
}
void Act() override {
if (deadline_ <= clock_->Now()) {
return;
}
if (alarm_->IsSet()) {
alarm_->Cancel();
times_cancelled_++;
} else {
alarm_->Set(deadline_);
times_set_++;
}
Schedule(clock_->Now() + interval_);
}
inline int times_set() { return times_set_; }
inline int times_cancelled() { return times_cancelled_; }
private:
QuicAlarm* alarm_;
QuicTime::Delta interval_;
QuicTime deadline_;
int times_set_;
int times_cancelled_;
};
class CounterDelegate : public QuicAlarm::DelegateWithoutContext {
public:
explicit CounterDelegate(size_t* counter) : counter_(counter) {}
void OnAlarm() override { *counter_ += 1; }
private:
size_t* counter_;
};
TEST_F(SimulatorTest, Alarms) {
Simulator simulator;
QuicAlarmFactory* alarm_factory = simulator.GetAlarmFactory();
size_t fast_alarm_counter = 0;
size_t slow_alarm_counter = 0;
std::unique_ptr<QuicAlarm> alarm_fast(
alarm_factory->CreateAlarm(new CounterDelegate(&fast_alarm_counter)));
std::unique_ptr<QuicAlarm> alarm_slow(
alarm_factory->CreateAlarm(new CounterDelegate(&slow_alarm_counter)));
const QuicTime start_time = simulator.GetClock()->Now();
alarm_fast->Set(start_time + QuicTime::Delta::FromMilliseconds(100));
alarm_slow->Set(start_time + QuicTime::Delta::FromMilliseconds(750));
AlarmToggler toggler(&simulator, "Toggler", alarm_slow.get(),
QuicTime::Delta::FromMilliseconds(100));
const QuicTime end_time =
start_time + QuicTime::Delta::FromMilliseconds(1000);
EXPECT_FALSE(simulator.RunUntil([&simulator, end_time]() {
return simulator.GetClock()->Now() >= end_time;
}));
EXPECT_EQ(1u, slow_alarm_counter);
EXPECT_EQ(1u, fast_alarm_counter);
EXPECT_EQ(4, toggler.times_set());
EXPECT_EQ(4, toggler.times_cancelled());
}
TEST_F(SimulatorTest, AlarmCancelling) {
Simulator simulator;
QuicAlarmFactory* alarm_factory = simulator.GetAlarmFactory();
size_t alarm_counter = 0;
std::unique_ptr<QuicAlarm> alarm(
alarm_factory->CreateAlarm(new CounterDelegate(&alarm_counter)));
const QuicTime start_time = simulator.GetClock()->Now();
const QuicTime alarm_at = start_time + QuicTime::Delta::FromMilliseconds(300);
const QuicTime end_time = start_time + QuicTime::Delta::FromMilliseconds(400);
alarm->Set(alarm_at);
alarm->Cancel();
EXPECT_FALSE(alarm->IsSet());
EXPECT_FALSE(simulator.RunUntil([&simulator, end_time]() {
return simulator.GetClock()->Now() >= end_time;
}));
EXPECT_FALSE(alarm->IsSet());
EXPECT_EQ(0u, alarm_counter);
}
TEST_F(SimulatorTest, AlarmInPast) {
Simulator simulator;
QuicAlarmFactory* alarm_factory = simulator.GetAlarmFactory();
size_t alarm_counter = 0;
std::unique_ptr<QuicAlarm> alarm(
alarm_factory->CreateAlarm(new CounterDelegate(&alarm_counter)));
const QuicTime start_time = simulator.GetClock()->Now();
simulator.RunFor(QuicTime::Delta::FromMilliseconds(400));
alarm->Set(start_time);
simulator.RunFor(QuicTime::Delta::FromMilliseconds(1));
EXPECT_FALSE(alarm->IsSet());
EXPECT_EQ(1u, alarm_counter);
}
TEST_F(SimulatorTest, RunUntilOrTimeout) {
Simulator simulator;
bool simulation_result;
Counter counter(&simulator, "counter", QuicTime::Delta::FromSeconds(1));
simulation_result = simulator.RunUntilOrTimeout(
[&counter]() { return counter.get_value() == 10; },
QuicTime::Delta::FromSeconds(20));
ASSERT_TRUE(simulation_result);
simulation_result = simulator.RunUntilOrTimeout(
[&counter]() { return counter.get_value() == 100; },
QuicTime::Delta::FromSeconds(20));
ASSERT_FALSE(simulation_result);
}
TEST_F(SimulatorTest, RunFor) {
Simulator simulator;
Counter counter(&simulator, "counter", QuicTime::Delta::FromSeconds(3));
simulator.RunFor(QuicTime::Delta::FromSeconds(100));
EXPECT_EQ(33, counter.get_value());
}
class MockPacketFilter : public PacketFilter {
public:
MockPacketFilter(Simulator* simulator, std::string name, Endpoint* endpoint)
: PacketFilter(simulator, name, endpoint) {}
MOCK_METHOD(bool, FilterPacket, (const Packet&), (override));
};
TEST_F(SimulatorTest, PacketFilter) {
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesPerSecond(1024 * 1024);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(5);
Simulator simulator;
LinkSaturator saturator_a(&simulator, "Saturator A", 1000, "Saturator B");
LinkSaturator saturator_b(&simulator, "Saturator B", 1000, "Saturator A");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
StrictMock<MockPacketFilter> a_to_b_filter(&simulator, "A -> B filter",
network_switch.port(1));
StrictMock<MockPacketFilter> b_to_a_filter(&simulator, "B -> A filter",
network_switch.port(2));
SymmetricLink link_a(&a_to_b_filter, &saturator_b, bandwidth,
base_propagation_delay);
SymmetricLink link_b(&b_to_a_filter, &saturator_a, bandwidth,
base_propagation_delay);
EXPECT_CALL(a_to_b_filter, FilterPacket(_)).WillRepeatedly(Return(true));
EXPECT_CALL(b_to_a_filter, FilterPacket(_)).WillRepeatedly(Return(false));
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_GE(saturator_b.counter()->packets(), 1u);
EXPECT_EQ(saturator_a.counter()->packets(), 0u);
}
TEST_F(SimulatorTest, TrafficPolicer) {
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesPerSecond(1024 * 1024);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(5);
const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 1000, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 1000, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
static const QuicByteCount initial_burst = 1000 * 10;
static const QuicByteCount max_bucket_size = 1000 * 100;
static const QuicBandwidth target_bandwidth = bandwidth * 0.25;
TrafficPolicer policer(&simulator, "Policer", initial_burst, max_bucket_size,
target_bandwidth, network_switch.port(2));
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, &policer, bandwidth, base_propagation_delay);
bool simulator_result = simulator.RunUntilOrTimeout(
[&saturator1]() {
return saturator1.bytes_transmitted() == initial_burst;
},
timeout);
ASSERT_TRUE(simulator_result);
saturator1.Pause();
simulator_result = simulator.RunUntilOrTimeout(
[&saturator2]() {
return saturator2.counter()->bytes() == initial_burst;
},
timeout);
ASSERT_TRUE(simulator_result);
saturator1.Resume();
const QuicTime::Delta simulation_time = QuicTime::Delta::FromSeconds(10);
simulator.RunFor(simulation_time);
for (auto* saturator : {&saturator1, &saturator2}) {
EXPECT_APPROX_EQ(bandwidth * simulation_time,
saturator->bytes_transmitted(), 0.01f);
}
EXPECT_APPROX_EQ(saturator1.bytes_transmitted() / 4,
saturator2.counter()->bytes(), 0.1f);
EXPECT_APPROX_EQ(saturator2.bytes_transmitted(),
saturator1.counter()->bytes(), 0.1f);
}
TEST_F(SimulatorTest, TrafficPolicerBurst) {
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesPerSecond(1024 * 1024);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(5);
const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 1000, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 1000, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
const QuicByteCount initial_burst = 1000 * 10;
const QuicByteCount max_bucket_size = 1000 * 100;
const QuicBandwidth target_bandwidth = bandwidth * 0.25;
TrafficPolicer policer(&simulator, "Policer", initial_burst, max_bucket_size,
target_bandwidth, network_switch.port(2));
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, &policer, bandwidth, base_propagation_delay);
bool simulator_result = simulator.RunUntilOrTimeout(
[&saturator1, &saturator2]() {
return saturator1.packets_transmitted() > 0 &&
saturator2.packets_transmitted() > 0;
},
timeout);
ASSERT_TRUE(simulator_result);
saturator1.Pause();
saturator2.Pause();
simulator.RunFor(1.5f * target_bandwidth.TransferTime(max_bucket_size));
saturator1.Resume();
simulator.RunFor(bandwidth.TransferTime(max_bucket_size));
saturator1.Pause();
simulator.RunFor(2 * base_propagation_delay);
EXPECT_APPROX_EQ(saturator1.bytes_transmitted(),
saturator2.counter()->bytes(), 0.1f);
saturator1.Resume();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_APPROX_EQ(saturator1.bytes_transmitted() / 4,
saturator2.counter()->bytes(), 0.1f);
}
TEST_F(SimulatorTest, PacketAggregation) {
const QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(1000);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMicroseconds(1);
const QuicByteCount aggregation_threshold = 1000;
const QuicTime::Delta aggregation_timeout = QuicTime::Delta::FromSeconds(30);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 10, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 10, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8, 10 * aggregation_threshold);
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, network_switch.port(2), bandwidth,
2 * base_propagation_delay);
Queue* queue = network_switch.port_queue(2);
queue->EnableAggregation(aggregation_threshold, aggregation_timeout);
network_switch.port_queue(1)->EnableAggregation(5, aggregation_timeout);
simulator.RunFor(0.9 * bandwidth.TransferTime(aggregation_threshold));
EXPECT_EQ(0u, saturator2.counter()->bytes());
saturator1.Pause();
saturator2.Pause();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_EQ(0u, saturator2.counter()->bytes());
EXPECT_EQ(900u, queue->bytes_queued());
EXPECT_EQ(910u, saturator1.counter()->bytes());
saturator1.Resume();
simulator.RunFor(0.5 * bandwidth.TransferTime(aggregation_threshold));
saturator1.Pause();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_EQ(1000u, saturator2.counter()->bytes());
EXPECT_EQ(400u, queue->bytes_queued());
simulator.RunFor(aggregation_timeout);
EXPECT_EQ(1400u, saturator2.counter()->bytes());
EXPECT_EQ(0u, queue->bytes_queued());
saturator1.Resume();
simulator.RunFor(5.5 * bandwidth.TransferTime(aggregation_threshold));
saturator1.Pause();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_EQ(6400u, saturator2.counter()->bytes());
EXPECT_EQ(500u, queue->bytes_queued());
simulator.RunFor(aggregation_timeout);
EXPECT_EQ(6900u, saturator2.counter()->bytes());
EXPECT_EQ(0u, queue->bytes_queued());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/simulator/simulator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/simulator/simulator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
17d19366-2d7f-45aa-9c96-4b5ac5ca6c8d | cpp | tensorflow/tensorflow | hlo_control_flow_flattening | third_party/xla/xla/tools/hlo_control_flow_flattening.cc | third_party/xla/xla/tools/hlo_control_flow_flattening_test.cc | #include "xla/tools/hlo_control_flow_flattening.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
HloInstruction* CreateConstant(const Shape& shape,
HloComputation* computation) {
if (shape.IsTuple()) {
std::vector<HloInstruction*> tuple_arguments(shape.tuple_shapes_size());
for (int index = 0; index < shape.tuple_shapes_size(); ++index) {
tuple_arguments[index] =
CreateConstant(shape.tuple_shapes(index), computation);
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_arguments));
} else {
return computation->AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(shape)));
}
}
void PrintSubexpression(HloInstruction* inst, int depth) {
if (depth == 0) {
return;
}
for (auto* operand : inst->operands()) {
PrintSubexpression(operand, depth - 1);
}
VLOG(2) << inst->ToString();
}
bool IsConstantScalarInt(const HloInstruction* inst) {
return inst->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsEffectiveScalar(inst->shape()) &&
inst->shape().IsInteger();
}
bool IsNotContainedInLoop(const HloInstruction& while_hlo,
const CallGraph& call_graph) {
const HloComputation* computation = while_hlo.parent();
while (!computation->IsEntryComputation()) {
auto& node = call_graph.GetNode(computation);
CHECK_EQ(node.caller_callsites().size(), 1)
<< "The module is not flattened!";
auto& callsite = node.caller_callsites()[0];
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
return false;
}
computation = callsite.instruction()->parent();
}
return true;
}
}
int GetLoopBound(const HloInstruction& while_hlo, const int default_loop_count,
const int max_loop_count) {
HloInstruction* condition = while_hlo.while_condition()->root_instruction();
if (condition->opcode() == HloOpcode::kCompare) {
int64_t value = 0;
Comparison::Direction cmp = condition->comparison_direction();
if ((cmp == Comparison::Direction::kLt ||
cmp == Comparison::Direction::kLe ||
cmp == Comparison::Direction::kNe) &&
IsConstantScalarInt(condition->operand(1))) {
value = *condition->operand(1)->literal().GetFirstInteger();
} else if ((cmp == Comparison::Direction::kGt ||
cmp == Comparison::Direction::kGe ||
cmp == Comparison::Direction::kNe) &&
IsConstantScalarInt(condition->operand(0))) {
value = *condition->operand(0)->literal().GetFirstInteger();
}
if (value > 0) {
return std::min(value, static_cast<int64_t>(max_loop_count));
}
}
return default_loop_count;
}
int GetLoopBoundWithOuterLoopMax(const HloInstruction& while_hlo,
const CallGraph& call_graph,
const int default_loop_count,
const int max_outer_loop_count,
const int max_loop_count) {
int loop_bound = GetLoopBound(while_hlo, default_loop_count, max_loop_count);
if (loop_bound > max_outer_loop_count) {
if (IsNotContainedInLoop(while_hlo, call_graph)) {
return max_outer_loop_count;
}
}
return loop_bound;
}
absl::Status HloControlFlowFlattening::FlattenWhileLoop(
HloInstruction* while_hlo, const CallGraph& call_graph) const {
CHECK_EQ(while_hlo->opcode(), HloOpcode::kWhile);
HloComputation* computation = while_hlo->parent();
HloInstruction* initialization = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
HloInstruction* old_tuple = while_hlo->mutable_operand(0);
HloInstruction* new_tuple =
TupleUtil::AppendSuffix(old_tuple, {initialization});
int new_tuple_size = new_tuple->shape().tuple_shapes().size();
TF_RETURN_IF_ERROR(while_hlo->ReplaceOperandWithDifferentShape(0, new_tuple));
auto change_op_shape = [&](HloInstruction* instruction) {
Shape* shape = instruction->mutable_shape();
CHECK(shape->IsTuple());
CHECK_EQ(shape->tuple_shapes().size(), new_tuple_size - 1);
Shape* subshape = shape->add_tuple_shapes();
return ShapeUtil::PopulateShape(S32, {}, subshape);
};
auto replace_non_gte_users =
[](HloInstruction* new_tuple) -> absl::StatusOr<HloInstruction*> {
CHECK(new_tuple->shape().IsTuple());
HloInstruction* prefix = nullptr;
std::vector<HloInstruction*> users(new_tuple->users());
for (HloInstruction* user : users) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
if (prefix == nullptr) {
prefix = TupleUtil::ExtractPrefix(
new_tuple, new_tuple->shape().tuple_shapes_size() - 1);
}
TF_RETURN_IF_ERROR(new_tuple->ReplaceUseWithDifferentShape(user, prefix));
}
return prefix;
};
{
HloComputation* condition = while_hlo->while_condition();
TF_RETURN_IF_ERROR(change_op_shape(condition->parameter_instruction(0)));
TF_RETURN_IF_ERROR(
replace_non_gte_users(condition->parameter_instruction(0)).status());
if (VLOG_IS_ON(2)) {
VLOG(2) << "Loop condition in " << while_hlo->parent()->name();
PrintSubexpression(condition->root_instruction(), 3);
}
const int loop_bound = GetLoopBoundWithOuterLoopMax(
*while_hlo, call_graph, while_execution_count_, max_outer_loop_count_,
max_loop_count_);
VLOG(1) << "loop_bound = " << loop_bound;
HloInstruction* limit = condition->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(loop_bound)));
Shape shape = initialization->shape();
HloInstruction* induction_variable =
condition->AddInstruction(HloInstruction::CreateGetTupleElement(
shape, condition->parameter_instruction(0), new_tuple_size - 1));
HloInstruction* compare =
condition->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), induction_variable, limit,
ComparisonDirection::kLt));
TF_RETURN_IF_ERROR(
condition->ReplaceInstruction(condition->root_instruction(), compare));
}
{
HloComputation* body = while_hlo->while_body();
TF_RETURN_IF_ERROR(change_op_shape(body->parameter_instruction(0)));
TF_RETURN_IF_ERROR(
replace_non_gte_users(body->parameter_instruction(0)).status());
HloInstruction* old_root = body->root_instruction();
Shape shape = initialization->shape();
HloInstruction* induction_variable =
body->AddInstruction(HloInstruction::CreateGetTupleElement(
shape, body->parameter_instruction(0), new_tuple_size - 1));
HloInstruction* increment = body->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
induction_variable = body->AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, induction_variable, increment));
HloInstruction* new_root =
TupleUtil::AppendSuffix(old_root, {induction_variable});
body->set_root_instruction(new_root, true);
}
std::vector<HloInstruction*> while_users(while_hlo->users().begin(),
while_hlo->users().end());
TF_RETURN_IF_ERROR(change_op_shape(while_hlo));
TF_ASSIGN_OR_RETURN(HloInstruction * prefix,
replace_non_gte_users(while_hlo));
if (while_hlo->parent()->root_instruction() == while_hlo) {
if (prefix == nullptr) {
prefix = TupleUtil::ExtractPrefix(while_hlo, new_tuple_size - 1);
}
while_hlo->parent()->set_root_instruction(prefix,
true);
}
return absl::OkStatus();
}
absl::Status HloControlFlowFlattening::RemoveInfeed(
HloInstruction* infeed_hlo) const {
CHECK_EQ(infeed_hlo->opcode(), HloOpcode::kInfeed);
HloComputation* computation = infeed_hlo->parent();
CHECK_EQ(infeed_hlo->shape().tuple_shapes_size(), 2);
const Shape& infeed_shape = ShapeUtil::GetSubshape(infeed_hlo->shape(), {0});
HloInstruction* custom_call = computation->AddInstruction(
HloInstruction::CreateCustomCall(infeed_shape, {}, kNopCustomCallTarget));
auto new_tuple = HloInstruction::CreateTuple(
{custom_call, infeed_hlo->mutable_operand(0)});
TF_RETURN_IF_ERROR(
computation->ReplaceWithNewInstruction(infeed_hlo, std::move(new_tuple)));
custom_call->SetAndSanitizeName(infeed_hlo->name());
return absl::OkStatus();
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
HloControlFlowFlattening::RemoveRecvAndRecvDone(
HloInstruction* recv_done,
absl::flat_hash_set<HloInstruction*>* additional_removed) const {
CHECK_EQ(recv_done->opcode(), HloOpcode::kRecvDone);
CHECK_EQ(recv_done->operand_count(), 1);
HloInstruction* recv = recv_done->mutable_operand(0);
CHECK_EQ(recv->opcode(), HloOpcode::kRecv);
HloComputation* computation = recv_done->parent();
CHECK_EQ(recv_done->shape().tuple_shapes_size(), 2);
HloModule* module = computation->parent();
HloInstruction* custom_call_recv =
computation->AddInstruction(HloInstruction::CreateCustomCall(
recv->shape(), recv->operands(), kNopCustomCallTarget));
std::string original_recv_name(recv->name());
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, recv, custom_call_recv);
}
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(recv, custom_call_recv));
custom_call_recv->SetAndSanitizeName(original_recv_name);
std::string original_recv_done_name(recv_done->name());
HloInstruction* custom_call_recv_done = computation->AddInstruction(
HloInstruction::CreateCustomCall(
recv_done->shape(), recv_done->operands(), kNopCustomCallTarget),
recv_done->name());
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, recv_done,
custom_call_recv_done);
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(recv_done, custom_call_recv_done));
custom_call_recv_done->SetAndSanitizeName(original_recv_done_name);
return std::make_pair(custom_call_recv, custom_call_recv_done);
}
absl::Status HloControlFlowFlattening::RemoveOutfeed(
HloInstruction* outfeed_hlo) const {
CHECK_EQ(outfeed_hlo->opcode(), HloOpcode::kOutfeed);
HloComputation* computation = outfeed_hlo->parent();
HloInstruction* custom_call =
computation->AddInstruction(HloInstruction::CreateCustomCall(
outfeed_hlo->shape(), outfeed_hlo->operands(),
kNopReturnTokenCustomCallTarget));
Cast<HloCustomCallInstruction>(custom_call)
->set_custom_call_has_side_effect(true);
custom_call->set_sharding(HloSharding::Manual());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(outfeed_hlo, custom_call));
custom_call->SetAndSanitizeName(outfeed_hlo->name());
return absl::OkStatus();
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
HloControlFlowFlattening::RemoveSendAndSendDone(
HloInstruction* send_done,
absl::flat_hash_set<HloInstruction*>* additional_removed) const {
CHECK_EQ(send_done->opcode(), HloOpcode::kSendDone);
CHECK_EQ(send_done->operand_count(), 1);
HloInstruction* send = send_done->mutable_operand(0);
CHECK_EQ(send->opcode(), HloOpcode::kSend);
HloComputation* computation = send_done->parent();
HloModule* module = computation->parent();
HloInstruction* custom_call_send =
computation->AddInstruction(HloInstruction::CreateCustomCall(
send->shape(), send->operands(), kNopCustomCallTarget));
std::string original_send_name(send->name());
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, send, custom_call_send);
}
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(send, custom_call_send));
custom_call_send->SetAndSanitizeName(original_send_name);
HloInstruction* custom_call_send_done =
computation->AddInstruction(HloInstruction::CreateCustomCall(
send_done->shape(), send_done->operands(),
kNopReturnTokenCustomCallTarget));
std::string original_send_done_name(send_done->name());
Cast<HloCustomCallInstruction>(custom_call_send_done)
->set_custom_call_has_side_effect(true);
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, send_done,
custom_call_send_done);
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(send_done, custom_call_send_done));
custom_call_send_done->SetAndSanitizeName(original_send_done_name);
return std::make_pair(custom_call_send, custom_call_send_done);
}
absl::StatusOr<HloInstruction*> HloControlFlowFlattening::RemoveCollective(
HloInstruction* hlo) const {
HloComputation* computation = hlo->parent();
HloInstruction* custom_call =
computation->AddInstruction(HloInstruction::CreateCustomCall(
hlo->shape(), hlo->operands(), kNopCustomCallTarget));
custom_call->CopyBackendConfigFrom(hlo);
HloModule* module = computation->parent();
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, hlo, custom_call);
}
std::string original_op_name(hlo->name());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, custom_call));
custom_call->SetAndSanitizeName(original_op_name);
return custom_call;
}
absl::Status HloControlFlowFlattening::RemoveId(HloInstruction* hlo) const {
HloComputation* computation = hlo->parent();
HloInstruction* zero = CreateConstant(hlo->shape(), computation);
std::string original_op_name(hlo->name());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, zero));
zero->SetAndSanitizeName(original_op_name);
return absl::OkStatus();
}
absl::StatusOr<bool> HloControlFlowFlattening::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto call_graph = CallGraph::Build(module);
bool changed = false;
absl::flat_hash_set<HloInstruction*> removed;
for (HloComputation* computation : module->computations(execution_threads)) {
if (computation->IsAsyncComputation()) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (removed.contains(instruction)) {
continue;
}
if (flatten_while_loop_ && instruction->opcode() == HloOpcode::kWhile) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(FlattenWhileLoop(instruction, *call_graph));
changed = true;
} else if (remove_infeed_outfeed_ &&
instruction->opcode() == HloOpcode::kInfeed) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveInfeed(instruction));
changed = true;
} else if (remove_infeed_outfeed_ &&
instruction->opcode() == HloOpcode::kOutfeed) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveOutfeed(instruction));
changed = true;
} else if (instruction->opcode() == HloOpcode::kSendDone) {
auto send_done_instruction =
DynCast<HloSendDoneInstruction>(instruction);
CHECK(send_done_instruction);
if (remove_comm_ || (remove_host_transfer_ &&
send_done_instruction->is_host_transfer())) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(
RemoveSendAndSendDone(instruction, &removed).status());
changed = true;
}
} else if (instruction->opcode() == HloOpcode::kRecvDone) {
auto recv_done_instruction =
DynCast<HloRecvDoneInstruction>(instruction);
CHECK(recv_done_instruction);
if (remove_comm_ || (remove_host_transfer_ &&
recv_done_instruction->is_host_transfer())) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(
RemoveRecvAndRecvDone(instruction, &removed).status());
changed = true;
}
} else if (remove_comm_ && IsCollective(instruction) &&
!instruction->parent()->IsFusionComputation() &&
(instruction->opcode() != HloOpcode::kAsyncStart &&
instruction->opcode() != HloOpcode::kAsyncUpdate)) {
if (instruction->opcode() == HloOpcode::kAsyncDone) {
while (instruction->opcode() == HloOpcode::kAsyncDone ||
instruction->opcode() == HloOpcode::kAsyncUpdate ||
instruction->opcode() == HloOpcode::kAsyncStart) {
HloInstruction* operand = instruction->mutable_operand(0);
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveCollective(instruction).status());
instruction = operand;
}
} else {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveCollective(instruction).status());
}
changed = true;
} else if ((remove_comm_ || remove_id_) &&
(instruction->opcode() == HloOpcode::kPartitionId ||
instruction->opcode() == HloOpcode::kReplicaId ||
(instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() == "SliceId"))) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveId(instruction));
changed = true;
}
}
}
HloDCE hlo_dce;
TF_ASSIGN_OR_RETURN(bool dce_changed, hlo_dce.Run(module, execution_threads));
changed |= dce_changed;
if (changed && module->has_schedule()) {
TF_RETURN_IF_ERROR(module->schedule().Update());
}
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
} | #include "xla/tools/hlo_control_flow_flattening.h"
#include <memory>
#include <utility>
#include "absl/strings/str_replace.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/despecializer.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/spmd/spmd_partitioner.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class HloControlFlowFlatteningTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
std::unique_ptr<VerifiedHloModule> hlo_module, int64_t num_devices = 2) {
spmd::SpmdPartitionerOptions options;
auto collective_ops_creator =
spmd::GetDefaultCollectiveOpsCreator(num_devices, 1);
collective_ops_creator.create_cross_partition_all_gather = nullptr;
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(num_devices);
HloPassPipeline pass("spmd-partitioning");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<spmd::SpmdPartitioner>(num_devices, 1,
options, collective_ops_creator);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(hlo_module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(hlo_module));
}
};
constexpr int kDefaultMaxLoopCount = 1000;
TEST_F(HloControlFlowFlatteningTest, WhileRoot) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto root = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1)));
EXPECT_THAT(while_op,
op::While(op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Constant())));
auto condition = while_op->while_condition();
EXPECT_THAT(
condition->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0), 2), op::Constant()));
auto body = while_op->while_body();
EXPECT_THAT(body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Add(op::GetTupleElement(op::Parameter(0), 2),
op::Constant())));
}
TEST_F(HloControlFlowFlatteningTest, WhileConditionCallComputation) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition.called {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] custom-call(), custom_call_target="AllocateBuffer", custom_call_has_side_effect=true
less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
ROOT tuple.2 = (pred[]) tuple(less-than)
}
While.condition {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
call = (pred[]) call(loop_var.3), to_apply=While.condition.called
ROOT get-tuple-element.4 = pred[] get-tuple-element(call), index=0
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
XLA_VLOG_LINES(3, "Loaded HLO module: " + module->ToString());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto root = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1)));
EXPECT_THAT(while_op,
op::While(op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Constant())));
auto condition = while_op->while_condition();
EXPECT_THAT(
condition->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0), 2), op::Constant()));
auto body = while_op->while_body();
EXPECT_THAT(body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Add(op::GetTupleElement(op::Parameter(0), 2),
op::Constant())));
}
TEST_F(HloControlFlowFlatteningTest, WhileRootScheduled) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto root = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1)));
EXPECT_THAT(while_op,
op::While(op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Constant())));
auto condition = while_op->while_condition();
EXPECT_THAT(
condition->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0), 2), op::Constant()));
}
TEST_F(HloControlFlowFlatteningTest, WhileUser) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
FusedComputation {
param = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(param), index=0
get-tuple-element.5 = s32[3]{0} get-tuple-element(param), index=1
broadcast = s32[3]{0} broadcast(get-tuple-element.4), dimensions={}
ROOT add = s32[3]{0} add(broadcast, get-tuple-element.5)
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
ROOT fusion = s32[3]{0} fusion(while), kind=kLoop, calls=FusedComputation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto fusion = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(fusion, op::Fusion(op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1))));
}
TEST_F(HloControlFlowFlatteningTest, Infeed) {
absl::string_view hlo_string = R"(
HloModule Infeed
ENTRY Infeed {
after-all = token[] after-all()
ROOT infeed.23 = ((bf16[3]{0}, s32[12,5]{0,1}), token[]) infeed(after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto custom_call =
module->entry_computation()->GetInstructionWithName("infeed.23");
EXPECT_THAT(custom_call, op::CustomCall());
auto tuple = module->entry_computation()->root_instruction();
EXPECT_THAT(tuple, op::Tuple(custom_call, op::AfterAll()));
}
TEST_F(HloControlFlowFlatteningTest, InfeedPreserveLayout) {
absl::string_view hlo_string = R"(
HloModule Infeed
ENTRY Infeed {
after-all = token[] after-all()
ROOT infeed = ((bf16[3]{0}, s32[12,5]{0,1:T(8,128)}), token[]) infeed(after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Shape root_shape = module->entry_computation()->root_instruction()->shape();
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto tuple = module->entry_computation()->root_instruction();
EXPECT_THAT(tuple, op::Tuple(op::CustomCall(), op::AfterAll()));
EXPECT_EQ(tuple->shape(), root_shape);
}
TEST_F(HloControlFlowFlatteningTest, OutfeedCustomCallIsPartitionable) {
absl::string_view hlo_string = R"(
HloModule Outfeed
ENTRY Outfeed {
param = (bf16[3]{0}, s32[12,5]{0,1}) parameter(0)
after-all = token[] after-all()
ROOT outfeed.23 = token[] outfeed(param, after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
3, 3,
3, true});
EXPECT_TRUE(flattening.Run(module.get()).value());
auto custom_call = module->entry_computation()->root_instruction();
EXPECT_EQ(custom_call->name(), "outfeed.23");
EXPECT_TRUE(custom_call->has_sharding());
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
PartitionComputation(std::move(module)));
}
TEST_F(HloControlFlowFlatteningTest, Outfeed) {
absl::string_view hlo_string = R"(
HloModule Outfeed
ENTRY Outfeed {
param = (bf16[3]{0}, s32[12,5]{0,1}) parameter(0)
after-all = token[] after-all()
ROOT outfeed.23 = token[] outfeed(param, after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto custom_call = module->entry_computation()->root_instruction();
EXPECT_EQ(custom_call->name(), "outfeed.23");
EXPECT_THAT(custom_call, op::CustomCall(op::Parameter(0), op::AfterAll()));
}
TEST_F(HloControlFlowFlatteningTest, AllReduce) {
absl::string_view hlo_string = R"(
HloModule AllReduce
sum {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY AllReduce {
param0 = f32[3]{0} parameter(0)
param1 = f32[12,5]{0,1} parameter(1)
ROOT all-reduce = (bf16[3]{0}, bf16[12,5]{0,1}) all-reduce(param0, param1), to_apply=sum, replica_groups={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0), op::Parameter(1)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"all-reduce");
}
TEST_F(HloControlFlowFlatteningTest, AllReduceStartAndDone) {
absl::string_view hlo_string = R"(
HloModule CRS
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
crs = f32[8]{0} all-reduce-start(input), replica_groups={}, to_apply=add
ROOT done = f32[8]{0} all-reduce-done(crs)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"crs");
}
TEST_F(HloControlFlowFlatteningTest, AllGather) {
absl::string_view hlo_string = R"(
HloModule AllGather
ENTRY AllGather {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,128]{0,1} all-gather(input), replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "ag");
}
TEST_F(HloControlFlowFlatteningTest, AllToAll) {
absl::string_view hlo_string = R"(
HloModule AllToAll
ENTRY AllToAll {
input = f32[128,32]{0,1} parameter(0)
ROOT a2a = (f32[128,32]{0,1}) all-to-all(input), replica_groups={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "a2a");
}
TEST_F(HloControlFlowFlatteningTest, CollectivePermute) {
absl::string_view hlo_string = R"(
HloModule CollectivePermute
ENTRY CollectivePermute {
input = f32[128,32]{0,1} parameter(0)
ROOT collective-permute = f32[128,32]{0,1} collective-permute(input), source_target_pairs={{0,1},{1,2},{2,3}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"collective-permute");
}
TEST_F(HloControlFlowFlatteningTest, ReplicaIdSucceedsWithChange) {
absl::string_view hlo_string = R"(
HloModule ReplicaId
ENTRY ReplicaId {
ROOT replica-id.18600 = u32[]{:T(128)} replica-id()
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(), op::Constant());
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"replica-id.18600");
}
TEST_F(HloControlFlowFlatteningTest, RemoveReplicaIdButKeepAllReduce) {
absl::string_view kHloText = R"(
HloModule RemoveReplicaIdButKeepCollective
%sum (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] a, f32[] b)
}
ENTRY ReplicaId {
replica-id.1 = u32[]{:T(128)} replica-id()
ROOT all-reduce.1 = u32[]{:T(128)} all-reduce(replica-id.1), to_apply=sum, replica_groups={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
1, 1,
1, false,
false, false,
false, true});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllReduce());
EXPECT_THAT(module->entry_computation()->root_instruction()->operand(0),
op::Constant());
}
TEST_F(HloControlFlowFlatteningTest, CollectivePermuteInPlaceUpdate) {
absl::string_view hlo_string = R"(
HloModule CollectivePermuteInPlaceUpdate
ENTRY CollectivePermuteInPlaceUpdate {
input = f32[128,32]{0,1} parameter(0)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT collective-permute = f32[128,128]{0,1} collective-permute(input, output, tuple.1, tuple.2), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{128,32}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0), op::Broadcast(op::Constant()),
op::Tuple(op::Constant(), op::Constant()),
op::Tuple(op::Constant(), op::Constant())));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"collective-permute");
}
TEST_F(HloControlFlowFlatteningTest, CollectivePermuteStartAndDone) {
absl::string_view hlo_string = R"(
HloModule CollectivePermuteStartAndDone
ENTRY CollectivePermuteStartAndDone {
input = f32[128,32]{0,1} parameter(0)
collective-permute-start.1 = (f32[128,32]{0,1}, f32[128,32]{0,1}, u32[], u32[]) collective-permute-start(input), source_target_pairs={{0,1},{1,2},{2,3}}
ROOT collective-permute-done.1 = f32[128,32]{0,1} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"collective-permute-done.1");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"collective-permute-start.1");
}
TEST_F(HloControlFlowFlatteningTest, Recv) {
absl::string_view hlo_string = R"(
HloModule Recv
ENTRY %Recv () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"recv-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"recv");
}
TEST_F(HloControlFlowFlatteningTest, RecvHostTransfer) {
absl::string_view hlo_string = R"(
HloModule Recv
ENTRY %Recv () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, is_host_transfer=true
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
3, 3,
3, true,
true, false,
true});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"recv-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"recv");
}
TEST_F(HloControlFlowFlatteningTest, Send) {
absl::string_view hlo_string = R"(
HloModule Send
ENTRY %Send () -> token[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%recv}
ROOT %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"send-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"send");
}
TEST_F(HloControlFlowFlatteningTest, SendHostTransfer) {
absl::string_view hlo_string = R"(
HloModule Send
ENTRY %Send () -> token[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, is_host_transfer=true, control-predecessors={%recv}
ROOT %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
3, 3,
3, true,
true, false,
true});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"send-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"send");
}
TEST_F(HloControlFlowFlatteningTest, AllGatherStartAndDone) {
absl::string_view hlo_string = R"(
HloModule AllGatherStartAndDone
ENTRY AllGatherStartAndDone {
%input = f32[8,256,256] parameter(0)
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %input), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
ROOT %ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "ag-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"ag-start");
}
TEST_F(HloControlFlowFlatteningTest, CollectiveFusion) {
absl::string_view hlo_template = R"(
HloModule collective-fusion, is_scheduled=true
%sum (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] a, f32[] b)
}
%all-gather {
%constant.3 = f32[] constant(0)
%broadcast = f32[full_size,8,128]{2,1,0} broadcast(%constant.3), dimensions={}
%input.0 = f32[4,8,128]{2,1,0} parameter(0)
%input.1 = f32[4,8,128]{2,1,0} parameter(1)
%replica-id.1 = u32[] replica-id()
%constant.4 = u32[] constant(4)
%multiply.1 = u32[] multiply(%replica-id.1, %constant.4)
%constant.5 = u32[] constant(0)
%constant.6 = u32[] constant(0)
%dynamic-update-slice = f32[full_size,8,128]{2,1,0} dynamic-update-slice(%broadcast, %input.0, %multiply.1, %constant.5, %constant.6)
%dynamic-update-slice.1 = f32[full_size,8,128]{2,1,0} dynamic-update-slice(%broadcast, %input.1, %multiply.1, %constant.5, %constant.6)
%all-reduce = (f32[full_size,8,128]{2,1,0}, f32[full_size,8,128]{2,1,0}) all-reduce(%dynamic-update-slice, %dynamic-update-slice.1), replica_groups={}, backend_config="{barrier_config:{barrier_type:3,id:0}}", to_apply=%sum
%gte0 = f32[full_size,8,128]{2,1,0} get-tuple-element(%all-reduce), index=0
%slice = f32[unpadded_size,8,128]{2,1,0} slice(%gte0), slice={[0:unpadded_size], [0:8], [0:128]}
%bitcast = f32[unpadded_size,1,8,128]{3,2,1,0} bitcast(%slice)
%gte1 = f32[full_size,8,128]{2,1,0} get-tuple-element(%all-reduce), index=1
ROOT %tuple = (f32[unpadded_size,1,8,128]{3,2,1,0}, f32[full_size,8,128]{2,1,0}) tuple(%bitcast, %gte1)
}
ENTRY main {
%add.1 = f32[4,8,128]{2,1,0} parameter(0)
%add.2 = f32[4,8,128]{2,1,0} parameter(1)
ROOT %fusion = (f32[unpadded_size,1,8,128]{3,2,1,0}, f32[full_size,8,128]{2,1,0}) fusion(%add.1, %add.2), kind=kCustom, calls=%all-gather
}
)";
auto hlo_string = absl::StrReplaceAll(
hlo_template, {{"full_size", absl::StrCat(12288)},
{"unpadded_size", absl::StrCat(12285)}});
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_TRUE(IsCollective(module->entry_computation()->root_instruction()));
HloControlFlowFlattening flattening({});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0), op::Parameter(1)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "fusion");
}
TEST_F(HloControlFlowFlatteningTest, AsyncAllToAll) {
absl::string_view hlo = R"(
ENTRY main {
param = f32[4,8,128]{2,1,0} parameter(0)
all-to-all-start = ((f32[4,8,128]{2,1,0}), f32[4,8,128]{2,1,0}, u32[], u32[]) all-to-all-start(param), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={1}
ROOT all-to-all-done = f32[4,8,128]{2,1,0} all-to-all-done(all-to-all-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
EXPECT_TRUE(IsCollective(module->entry_computation()->root_instruction()));
HloControlFlowFlattening flattening({});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
}
void CheckWhileBound(HloInstruction* while_op, int expected_bound) {
auto* cond = while_op->while_condition();
ASSERT_NE(cond, nullptr);
auto* hlo_bound = cond->root_instruction()->operand(1);
EXPECT_TRUE(hlo_bound->IsConstant());
if (hlo_bound->IsConstant()) {
EXPECT_TRUE(hlo_bound->literal().IsAll(expected_bound));
}
}
TEST_F(HloControlFlowFlatteningTest, MaxOuterLoopCount) {
absl::string_view hlo_string = R"(
HloModule NestedWhileComp
InnerBody {
constant.8 = pred[] constant(false)
parameter.5 = (s32[], s32[]) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(parameter.5), index=0
constant.9 = s32[] constant(1)
add.10 = s32[] add(get-tuple-element.6, constant.9)
get-tuple-element.7 = s32[] get-tuple-element(parameter.5), index=1
constant.11 = s32[] constant(1)
add.12 = s32[] add(get-tuple-element.7, constant.11)
ROOT tuple.13 = (s32[], s32[]) tuple(add.10, add.12)
}
InnerCond {
parameter.15 = (s32[], s32[]) parameter(0)
get-tuple-element.17 = s32[] get-tuple-element(parameter.15), index=1
constant.18 = pred[] constant(false)
get-tuple-element.16 = s32[] get-tuple-element(parameter.15), index=0
inner_bound = s32[] constant(100)
ROOT compare.20 = pred[] compare(get-tuple-element.16, inner_bound), direction=LT
}
OuterBody {
constant.24 = pred[] constant(false)
constant.25 = s32[] constant(0)
parameter.22 = (s32[]) parameter(0)
get-tuple-element.23 = s32[] get-tuple-element(parameter.22), index=0
tuple.26 = (s32[], s32[]) tuple(constant.25, get-tuple-element.23)
inner_while = (s32[], s32[]) while(tuple.26), condition=InnerCond, body=InnerBody
get-tuple-element.28 = s32[] get-tuple-element(inner_while), index=0
get-tuple-element.29 = s32[] get-tuple-element(inner_while), index=1
tuple.30 = (s32[], s32[]) tuple(get-tuple-element.28, get-tuple-element.29)
get-tuple-element.31 = s32[] get-tuple-element(tuple.30), index=0
get-tuple-element.32 = s32[] get-tuple-element(tuple.30), index=1
ROOT tuple.33 = (s32[]) tuple(get-tuple-element.32)
}
OuterCond {
constant.37 = pred[] constant(false)
parameter.35 = (s32[]) parameter(0)
get-tuple-element.36 = s32[] get-tuple-element(parameter.35), index=0
outer_bound = s32[] constant(1000)
ROOT compare.39 = pred[] compare(get-tuple-element.36, outer_bound), direction=LT
}
ENTRY NestedWhileComp {
constant.1 = pred[] constant(false)
constant.2 = s32[] constant(0)
tuple.3 = (s32[]) tuple(constant.2)
outer_while = (s32[]) while(tuple.3), condition=OuterCond, body=OuterBody
get-tuple-element.41 = s32[] get-tuple-element(outer_while), index=0
tuple.42 = (s32[]) tuple(get-tuple-element.41)
get-tuple-element.43 = s32[] get-tuple-element(tuple.42), index=0
ROOT tuple.44 = (s32[]) tuple(get-tuple-element.43)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
constexpr int kWhileExecutionCount = 5;
constexpr int kExistingInnerLoopCount = 100;
constexpr int kMaxLoopCount = 10;
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
kWhileExecutionCount,
kMaxLoopCount});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
LOG(INFO) << module->ToString();
auto* outer_while =
module->entry_computation()->GetInstructionWithName("outer_while");
ASSERT_NE(outer_while, nullptr);
CheckWhileBound(outer_while, kMaxLoopCount);
auto* while_body = outer_while->while_body();
ASSERT_NE(while_body, nullptr);
auto* inner_while = while_body->GetInstructionWithName("inner_while");
ASSERT_NE(inner_while, nullptr);
CheckWhileBound(inner_while, kExistingInnerLoopCount);
}
TEST_F(HloControlFlowFlatteningTest, MatchLtUseInferedLoopCount) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(GetLoopBound(*module->entry_computation()->root_instruction(), 123,
kDefaultMaxLoopCount),
100);
}
TEST_F(HloControlFlowFlatteningTest, MatchGtUseInferedLoopCount) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(50)
ROOT greater-than = pred[] compare(constant.2, get-tuple-element.3), direction=GT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(GetLoopBound(*module->entry_computation()->root_instruction(), 123,
kDefaultMaxLoopCount),
50);
}
TEST_F(HloControlFlowFlatteningTest, NotMatchEqUseDefaultLoopCount) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT equal = pred[] compare(get-tuple-element.3, constant.2), direction=EQ
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(GetLoopBound(*module->entry_computation()->root_instruction(), 123,
kDefaultMaxLoopCount),
123);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_control_flow_flattening.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_control_flow_flattening_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a31fc2ca-d988-44e3-8fc8-687ff14810e6 | cpp | tensorflow/tensorflow | transpose_conv | tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc | tensorflow/lite/delegates/xnnpack/transpose_conv_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ConvolutionTransposedBuffers : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution Transposed does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const ConvolutionTransposedAttributes&>(ctx.op_attr);
auto weights = attr.weights.shape;
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"kernel_size", int2(weights.w, weights.h)},
{"stride", int2(attr.stride.w, attr.stride.h)},
{"padding", int2(weights.w - 1 - attr.padding.prepended.w,
weights.h - 1 - attr.padding.prepended.h)},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights",
MakeReadonlyObject(Get3DSizeForPHWO4I4(attr.weights.shape),
ConvertToPHWO4I4Transposed(attr.weights))}};
std::string source = R"(
#define IN_BOUNDS(p, p0, p1) (all(greaterThanEqual(p, p0)) && all(lessThan(p, p1)))
ivec2 p0 = ($padding$ + $stride$ - gid.xy % $stride$) % $stride$;
for (int y = p0.y; y < $kernel_size.y$; y += $stride.y$) {
for (int x = p0.x; x < $kernel_size.x$; x += $stride.x$) {
int i = int(float(y * $kernel_size.x$) + float(x));
ivec2 idx = ivec2(vec2(gid.xy + ivec2(x, y)) - vec2($padding$));
if (IN_BOUNDS(idx, ivec2(0), ivec2($input_data_0_w$, $input_data_0_h$) * $stride$)) {
ivec2 coord = idx / $stride$;
for (int l = 0; l < $src_depth$; ++l) {
vec4 src_color = $input_data_0[coord.x, coord.y, l]$;
value_0.x += dot(src_color, $weights[l * 4 + 0, i, gid.z]$);
value_0.y += dot(src_color, $weights[l * 4 + 1, i, gid.z]$);
value_0.z += dot(src_color, $weights[l * 4 + 2, i, gid.z]$);
value_0.w += dot(src_color, $weights[l * 4 + 3, i, gid.z]$);
}
}
}
}
)";
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
uint3(),
source,
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewConvolutionTransposedNodeShader() {
return std::make_unique<ConvolutionTransposedBuffers>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/transpose_conv_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(TransposeConvTest, 2x2Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(2)
.KernelWidth(2)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 2x2Stride2NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(2)
.KernelWidth(2)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 3x3Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(3)
.KernelWidth(3)
.StrideHeight(2)
.StrideWidth(2)
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 3x3Stride2NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(3)
.KernelWidth(3)
.StrideHeight(2)
.StrideWidth(2)
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride2NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(2)
.StrideWidth(2)
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(4)
.StrideWidth(4)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, 4x4Stride4NoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(4)
.KernelWidth(4)
.StrideHeight(4)
.StrideWidth(4)
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithSamePaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SmallKernelWithValidPaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithSamePaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, StrideWithValidPaddingNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ValidPadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, FP16WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.FP16Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, TensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, TensorWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.TensorWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, ChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, ChannelWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.ChannelWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseWeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseFP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseFP16WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.FP16Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseTensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseTensorWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.TensorWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, SparseChannelWiseQuantizedInt8WeightsNoBias) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.SparseWeights()
.ChannelWiseQuantizedInt8Weights()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, MultiThreadingNoBias) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.NoBias()
.Test(xnnpack_delegate.get());
}
TEST(TransposeConvTest, WeightsCache) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
std::unique_ptr<TfLiteXNNPackDelegateWeightsCache,
decltype(&TfLiteXNNPackDelegateWeightsCacheDelete)>
weights_cache(TfLiteXNNPackDelegateWeightsCacheCreate(),
TfLiteXNNPackDelegateWeightsCacheDelete);
delegate_options.weights_cache = weights_cache.get();
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto output_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
TransposeConvTester()
.BatchSize(batch_rng())
.OutputHeight(output_rng())
.OutputWidth(output_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.WeightsCache(weights_cache.get())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/transpose_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09730dec-80af-493b-9079-4dd8922a3bdf | cpp | tensorflow/tensorflow | slice_sinker | third_party/xla/xla/service/slice_sinker.cc | third_party/xla/xla/service/slice_sinker_test.cc | #include "xla/service/slice_sinker.h"
#include <algorithm>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/shape_util.h"
namespace xla {
namespace {
bool SameSliceConfiguration(const HloInstruction* slice_1,
const HloInstruction* slice_2) {
CHECK_EQ(slice_1->opcode(), HloOpcode::kSlice);
CHECK_EQ(slice_2->opcode(), HloOpcode::kSlice);
CHECK(slice_1->operand(0)->shape().dimensions() ==
slice_2->operand(0)->shape().dimensions());
return slice_1->slice_starts() == slice_2->slice_starts() &&
slice_1->slice_limits() == slice_2->slice_limits() &&
slice_1->slice_strides() == slice_2->slice_strides();
}
bool IsElementwiseOperationOnSimilarSlices(const HloInstruction* inst) {
CHECK(inst->IsElementwise());
if (absl::c_any_of(inst->operands(), [](const HloInstruction* operand) {
return operand->opcode() != HloOpcode::kSlice;
})) {
return false;
}
const HloInstruction* slice0 = inst->operand(0);
return absl::c_all_of(absl::MakeSpan(inst->operands()).subspan(1),
[slice0](const HloInstruction* slice) {
return ShapeUtil::CompatibleIgnoringElementType(
slice0->operand(0)->shape(),
slice->operand(0)->shape()) &&
SameSliceConfiguration(slice0, slice);
});
}
bool IsSimilarOperationOnSlices(const HloInstruction* operation_on_slices,
const HloInstruction* candidate) {
if (candidate->user_count() == 0) {
return false;
}
if (!candidate->SameOp(*operation_on_slices) ||
operation_on_slices->shape().element_type() !=
candidate->shape().element_type()) {
return false;
}
const HloInstruction* operand_slice0 = candidate->operand(0);
for (int64_t i = 0; i < candidate->operand_count(); ++i) {
const HloInstruction* operand_slice = candidate->operand(i);
if (operand_slice->opcode() != HloOpcode::kSlice ||
operand_slice->operand(0) !=
operation_on_slices->operand(i)->operand(0) ||
!SameSliceConfiguration(operand_slice0, operand_slice)) {
return false;
}
}
return true;
}
bool ShouldTransform(const std::vector<HloInstruction*>& operations_on_slices) {
int64_t sum = 0;
for (HloInstruction* user : operations_on_slices) {
sum += ShapeUtil::ElementsIn(user->shape());
}
return sum >= xla::ShapeUtil::ElementsIn(
operations_on_slices[0]->operand(0)->operand(0)->shape());
}
std::optional<std::vector<HloInstruction*>> FindElementwiseOperationGroup(
const HloInstruction* operation_on_slices) {
std::vector<HloInstruction*> operations;
const HloInstruction* slice_source0 =
operation_on_slices->operand(0)->operand(0);
for (const HloInstruction* operand_slice0 : slice_source0->users()) {
if (operand_slice0->opcode() != HloOpcode::kSlice) {
continue;
}
for (HloInstruction* user : operand_slice0->users()) {
if (IsSimilarOperationOnSlices(operation_on_slices, user)) {
operations.push_back(user);
}
}
}
return ShouldTransform(operations) ? std::make_optional(operations)
: std::nullopt;
}
absl::Status SinkSlices(
const std::vector<HloInstruction*>& slice_sources,
const std::vector<HloInstruction*>& operation_on_slices) {
const Shape shape = slice_sources[0]->shape();
PrimitiveType element_type = operation_on_slices[0]->shape().element_type();
Shape new_shape = ShapeUtil::ChangeElementType(shape, element_type);
HloComputation* computation = operation_on_slices[0]->parent();
auto operation_on_slice_sources = computation->AddInstruction(
operation_on_slices[0]->CloneWithNewOperands(new_shape, slice_sources));
VLOG(10) << "Adding operation_on_slice_sources: "
<< operation_on_slice_sources->ToString();
for (HloInstruction* user : operation_on_slices) {
const HloInstruction* operand_slice = user->operand(0);
auto user_slice =
computation->AddInstruction(operand_slice->CloneWithNewOperands(
user->shape(), {operation_on_slice_sources}));
VLOG(10) << "Adding new slice: " << user_slice->ToString()
<< " to replace: " << user->ToString();
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(user_slice));
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> SliceSinker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (!instruction->IsElementwise() || instruction->operand_count() == 0 ||
instruction->user_count() == 0) {
continue;
}
VLOG(10) << "Processing instruction : " << instruction->ToString();
if (!IsElementwiseOperationOnSimilarSlices(instruction)) {
continue;
}
std::optional<std::vector<HloInstruction*>> similar_operations =
FindElementwiseOperationGroup(instruction);
if (!similar_operations.has_value()) {
continue;
}
std::vector<HloInstruction*> slice_sources;
absl::c_transform(
instruction->operands(), std::back_inserter(slice_sources),
[](HloInstruction* slice) { return slice->mutable_operand(0); });
TF_RETURN_IF_ERROR(SinkSlices(slice_sources, similar_operations.value()));
changed = true;
}
}
return changed;
}
} | #include "xla/service/slice_sinker.h"
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
namespace m = match;
using ::testing::ElementsAre;
class SliceSinkerTest : public HloTestBase {};
TEST_F(SliceSinkerTest, TernaryOperation) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = pred[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
p2 = f32[8,9] parameter(2)
s00 = pred[2,9] slice(pred[8,9] p0), slice={[0:2], [0:9]}
s01 = pred[6,9] slice(pred[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
s20 = f32[2,9] slice(f32[8,9] p2), slice={[0:2], [0:9]}
s21 = f32[6,9] slice(f32[8,9] p2), slice={[2:8], [0:9]}
sel0 = f32[2,9] select(pred[2,9] s00, f32[2,9] s10, f32[2,9] s20)
sel1 = f32[6,9] select(pred[6,9] s01, f32[6,9] s11, f32[6,9] s21)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(sel0, sel1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2))),
m::Slice(&slice1, m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, OverlappingPartialSlicesBeneficial) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]}
s02 = f32[8,4] slice(f32[8,9] p0), slice={[0:8], [0:4]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]}
s12 = f32[8,4] slice(f32[8,9] p1), slice={[0:8], [0:4]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
add2 = f32[8,4] add(f32[8,4] s02, f32[8,4] s12)
ROOT tuple = (f32[2,9], f32[5,9], f32[8,4]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 4));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, SameSliceSourcesTwoPeerGroups) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s02 = f32[8,2] slice(f32[8,9] p0), slice={[0:8], [0:2]}
s03 = f32[8,7] slice(f32[8,9] p0), slice={[0:8], [2:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
s12 = f32[8,2] slice(f32[8,9] p1), slice={[0:8], [0:2]}
s13 = f32[8,7] slice(f32[8,9] p1), slice={[0:8], [2:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
mul0 = f32[8,2] multiply(f32[8,2] s02, f32[8,2] s12)
mul1 = f32[8,7] multiply(f32[8,7] s03, f32[8,7] s13)
ROOT tuple = (f32[2,9], f32[6,9], f32[8,2], f32[8,7]) tuple(add0, add1, mul0, mul1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
const HloInstruction* slice3;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 2));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice3->slice_starts(), ElementsAre(0, 2));
EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, OverlappingMultipleSlices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]}
s02 = f32[3,9] slice(f32[8,9] p0), slice={[2:5], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]}
s12 = f32[3,9] slice(f32[8,9] p1), slice={[2:5], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
add2 = f32[3,9] add(f32[3,9] s02, f32[3,9] s12)
ROOT tuple = (f32[2,9], f32[5,9], f32[3,9]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(5, 9));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, DisjointedPartialSlices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[2:7], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[2:7], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
ROOT tuple = (f32[2,9], f32[5,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, OverlappingPartialSlicesNotBeneficial) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,7] slice(f32[8,9] p0), slice={[0:2], [0:7]}
s01 = f32[6,7] slice(f32[8,9] p0), slice={[2:8], [0:7]}
s10 = f32[2,7] slice(f32[8,9] p1), slice={[0:2], [0:7]}
s11 = f32[6,7] slice(f32[8,9] p1), slice={[2:8], [0:7]}
add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10)
add1 = f32[6,7] add(f32[6,7] s01, f32[6,7] s11)
ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, DifferentOrderingOfSliceSources) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,7] parameter(0)
p1 = f32[8,7] parameter(1)
s00 = f32[2,7] slice(f32[8,7] p0), slice={[0:2], [0:7]}
s01 = f32[6,7] slice(f32[8,7] p0), slice={[2:8], [0:7]}
s10 = f32[2,7] slice(f32[8,7] p1), slice={[0:2], [0:7]}
s11 = f32[6,7] slice(f32[8,7] p1), slice={[2:8], [0:7]}
add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10)
add1 = f32[6,7] add(f32[6,7] s11, f32[6,7] s01)
ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesFromDifferentIndices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:4], [0:9]}
s01 = f32[4,9] slice(f32[8,9] p0), slice={[4:8], [0:9]}
s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:4], [0:9]}
s11 = f32[4,9] slice(f32[8,9] p1), slice={[4:8], [0:9]}
add0 = f32[4,9] add(f32[4,9] s01, f32[4,9] s10)
add1 = f32[4,9] add(f32[4,9] s00, f32[4,9] s11)
ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, DifferentOperator) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
mul = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10)
add = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(mul, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SameOperatorDifferentAttributes) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
cmp1 = pred[2,9] compare(f32[2,9] s00, f32[2,9] s10), direction=GT
cmp2 = pred[6,9] compare(f32[6,9] s01, f32[6,9] s11), direction=LT
ROOT tuple = (pred[2,9], pred[6,9]) tuple(cmp1, cmp2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesWithMultiUsers) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
mul0 = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10)
mul1 = f32[6,9] multiply(f32[6,9] s01, f32[6,9] s11)
ROOT tuple = (f32[2,9], f32[6,9], f32[2,9], f32[6,9]) tuple(add0, add1, mul0, mul1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
const HloInstruction* slice3;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice3->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, NonElementWise) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8] parameter(0)
s00 = f32[2] slice(f32[8] p0), slice={[0:2]}
s01 = f32[6] slice(f32[8] p0), slice={[2:8]}
bc0 = f32[2,9] broadcast(f32[2] s00), dimensions={0}
bc1 = f32[6,9] broadcast(f32[6] s01), dimensions={0}
ROOT tuple = (f32[2,9], f32[6,9]) tuple(bc0, bc1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesWithNontrivialStrides) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:7:2], [0:9]}
s01 = f32[4,9] slice(f32[8,9] p0), slice={[1:8:2], [0:9]}
s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:7:2], [0:9]}
s11 = f32[4,9] slice(f32[8,9] p1), slice={[1:8:2], [0:9]}
add0 = f32[4,9] add(f32[4,9] s00, f32[4,9] s10)
add1 = f32[4,9] add(f32[4,9] s01, f32[4,9] s11)
ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(7, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(2, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(1, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(2, 1));
}
TEST_F(SliceSinkerTest, NotAllSliceOperand) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[2,9] parameter(1)
p2 = f32[6,9] parameter(2)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
abs0 = f32[2,9] abs(f32[2,9] p1)
abs1 = f32[6,9] abs(f32[6,9] p2)
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, Cascade) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
abs0 = f32[2,9] abs(f32[2,9] s10)
abs1 = f32[6,9] abs(f32[6,9] s11)
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))),
m::Slice(&slice1,
m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, SameOpcodeDifferentResultElementTypes) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
convert0 = s32[2,9] convert(f32[2,9] s00)
convert1 = s64[6,9] convert(f32[6,9] s01)
ROOT tuple = (s32[2,9], s64[6,9]) tuple(convert0, convert1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/slice_sinker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/slice_sinker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
89a1a9b3-e32c-4cc4-b376-5c91402185c0 | cpp | tensorflow/tensorflow | reverse_op | tensorflow/compiler/tf2xla/kernels/reverse_op.cc | tensorflow/core/kernels/reverse_op_test.cc | #include <vector>
#include "absl/container/inlined_vector.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
class ReverseOp : public XlaOpKernel {
public:
explicit ReverseOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape x_shape = ctx->InputShape(0);
const TensorShape revd_shape = ctx->InputShape(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(revd_shape),
errors::InvalidArgument("axes must be a vector, not shape ",
revd_shape.DebugString()));
OP_REQUIRES(ctx, revd_shape.num_elements() == x_shape.dims(),
errors::InvalidArgument("axes ", revd_shape.DebugString(),
" must have same number of elements as"
" than input tensor has dimensions ",
x_shape.DebugString(), "."));
if (revd_shape.num_elements() == 0) {
ctx->SetOutput(0, ctx->Input(0));
return;
}
xla::Literal lax;
OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &lax));
std::vector<int64_t> dimensions;
for (int d = 0; d < x_shape.dims(); ++d) {
if (lax.Get<bool>({d})) {
dimensions.push_back(d);
}
}
ctx->SetOutput(0, xla::Rev(ctx->Input(0), dimensions));
}
};
REGISTER_XLA_OP(Name("Reverse").CompileTimeConstantInput("dims"), ReverseOp);
class ReverseV2Op : public XlaOpKernel {
public:
explicit ReverseV2Op(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape x_shape = ctx->InputShape(0);
const TensorShape axes_shape = ctx->InputShape(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(axes_shape),
errors::InvalidArgument("axes must be a vector, not shape ",
axes_shape.DebugString()));
OP_REQUIRES(ctx, axes_shape.num_elements() <= x_shape.dims(),
errors::InvalidArgument("axes ", axes_shape.DebugString(),
" can not have more elements"
" than input tensor has dimensions ",
x_shape.DebugString(), "."));
if (axes_shape.num_elements() == 0) {
ctx->SetOutput(0, ctx->Input(0));
return;
}
std::vector<int64_t> axes;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &axes));
absl::InlinedVector<bool, 8> witnessed_axes(x_shape.dims(), false);
for (int d = 0; d < axes.size(); ++d) {
OP_REQUIRES(
ctx, (-x_shape.dims() <= axes[d]) && (axes[d] < x_shape.dims()),
errors::InvalidArgument(axes[d], " is out of range [-",
x_shape.dims(), ", ", x_shape.dims(), ")."));
if (axes[d] < 0) {
axes[d] += x_shape.dims();
}
OP_REQUIRES(ctx, !witnessed_axes[axes[d]],
errors::InvalidArgument("canonicalized axis ", axes[d],
" was repeated."));
witnessed_axes[axes[d]] = true;
}
ctx->SetOutput(0, xla::Rev(ctx->Input(0), axes));
}
};
REGISTER_XLA_OP(Name("ReverseV2").CompileTimeConstantInput("axis"),
ReverseV2Op);
}
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class ReverseOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "Reverse")
.Input(FakeInput(data_type))
.Input(FakeInput())
.Attr("T", data_type)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
template <typename T>
void Reverse_0() {
MakeOp(DataTypeToEnum<T>::value);
AddInputFromArray<T>(TensorShape({}), {3});
AddInputFromArray<bool>(TensorShape({}), {true});
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
Tensor expected(allocator(), DataTypeToEnum<T>::value, TensorShape({}));
expected.scalar<T>() = expected.scalar<T>().constant(3);
test::ExpectTensorEqual<T>(expected, *output);
}
template <typename T>
void Reverse_234() {
MakeOp(DataTypeToEnum<T>::value);
AddInputFromArray<T>(TensorShape({2, 3, 4}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
AddInputFromArray<bool>(TensorShape({3}), {true, false, true});
TF_ASSERT_OK(RunOpKernel());
Tensor* params_tensor = GetOutput(0);
Tensor expected(allocator(), DataTypeToEnum<T>::value,
TensorShape({2, 3, 4}));
test::FillValues<T>(&expected,
{15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20,
3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8});
test::ExpectTensorEqual<T>(expected, *params_tensor);
}
template <typename T>
void Reverse_1234() {
MakeOp(DataTypeToEnum<T>::value);
AddInputFromArray<T>(TensorShape({1, 2, 3, 4}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
AddInputFromArray<bool>(TensorShape({4}), {true, true, false, true});
TF_ASSERT_OK(RunOpKernel());
Tensor* params_tensor = GetOutput(0);
Tensor expected(allocator(), DataTypeToEnum<T>::value,
TensorShape({1, 2, 3, 4}));
test::FillValues<T>(&expected,
{15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20,
3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8});
test::ExpectTensorEqual<T>(expected, *params_tensor);
}
};
TEST_F(ReverseOpTest, Reverse_0_uint8) { Reverse_0<uint8>(); }
TEST_F(ReverseOpTest, Reverse_0_int8) { Reverse_0<int8>(); }
TEST_F(ReverseOpTest, Reverse_0_uint16) { Reverse_0<uint16>(); }
TEST_F(ReverseOpTest, Reverse_0_int16) { Reverse_0<int16>(); }
TEST_F(ReverseOpTest, Reverse_0_float) { Reverse_0<float>(); }
TEST_F(ReverseOpTest, Reverse_0_int32) { Reverse_0<int32>(); }
TEST_F(ReverseOpTest, Reverse_0_int64) { Reverse_0<int64_t>(); }
TEST_F(ReverseOpTest, Reverse_0_double) { Reverse_0<double>(); }
TEST_F(ReverseOpTest, Reverse_0_complex64) { Reverse_0<complex64>(); }
TEST_F(ReverseOpTest, Reverse_0_complex128) { Reverse_0<complex128>(); }
TEST_F(ReverseOpTest, Reverse_234_uint8) { Reverse_234<uint8>(); }
TEST_F(ReverseOpTest, Reverse_234_int8) { Reverse_234<int8>(); }
TEST_F(ReverseOpTest, Reverse_234_uint16) { Reverse_234<uint16>(); }
TEST_F(ReverseOpTest, Reverse_234_int16) { Reverse_234<int16>(); }
TEST_F(ReverseOpTest, Reverse_234_float) { Reverse_234<float>(); }
TEST_F(ReverseOpTest, Reverse_234_int32) { Reverse_234<int32>(); }
TEST_F(ReverseOpTest, Reverse_234_int64) { Reverse_234<int64_t>(); }
TEST_F(ReverseOpTest, Reverse_234_double) { Reverse_234<double>(); }
TEST_F(ReverseOpTest, Reverse_234_complex64) { Reverse_234<complex64>(); }
TEST_F(ReverseOpTest, Reverse_234_complex128) { Reverse_234<complex128>(); }
TEST_F(ReverseOpTest, Reverse_1234_uint8) { Reverse_1234<uint8>(); }
TEST_F(ReverseOpTest, Reverse_1234_int8) { Reverse_1234<int8>(); }
TEST_F(ReverseOpTest, Reverse_1234_uint16) { Reverse_1234<uint16>(); }
TEST_F(ReverseOpTest, Reverse_1234_int16) { Reverse_1234<int16>(); }
TEST_F(ReverseOpTest, Reverse_1234_float) { Reverse_1234<float>(); }
TEST_F(ReverseOpTest, Reverse_1234_int32) { Reverse_1234<int32>(); }
TEST_F(ReverseOpTest, Reverse_1234_int64) { Reverse_1234<int64_t>(); }
TEST_F(ReverseOpTest, Reverse_1234_double) { Reverse_1234<double>(); }
TEST_F(ReverseOpTest, Reverse_1234_complex64) { Reverse_1234<complex64>(); }
TEST_F(ReverseOpTest, Reverse_1234_complex128) { Reverse_1234<complex128>(); }
static SessionOptions GetOptions(int intra_threads) {
SessionOptions opts;
opts.config.set_intra_op_parallelism_threads(intra_threads);
opts.config.set_inter_op_parallelism_threads(1);
return opts;
}
template <typename T>
static Graph* Reverse(const TensorShape& shape, int reverse_axis) {
Graph* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<T>::value, shape);
data.flat<T>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = reverse_axis;
test::graph::Reverse(g, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
template <typename T>
static void RunReverseRowsBenchmark(::testing::benchmark::State& state,
int outer_dim, int middle_dim,
int intra_threads, int channels) {
SessionOptions opts = GetOptions(intra_threads);
TensorShape shape{outer_dim, middle_dim, channels};
test::Benchmark("cpu", Reverse<T>(shape, 1), &opts, nullptr, nullptr, "",
false)
.Run(state);
const int64_t num_items =
static_cast<int64_t>(state.iterations()) * shape.num_elements();
state.SetItemsProcessed(num_items);
state.SetBytesProcessed(num_items * sizeof(T));
}
void BM_ReverseRowsOf1Channel_1T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
1 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_1T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf1Channel_1T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
1 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_1T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf1Channel_4T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
4 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_4T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf1Channel_4T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
4 , 1 );
}
BENCHMARK(BM_ReverseRowsOf1Channel_4T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_1T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
1 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_1T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_1T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
1 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_1T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_4T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
4 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_4T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf3Channels_4T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
4 , 3 );
}
BENCHMARK(BM_ReverseRowsOf3Channels_4T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(30, 30)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_1T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
1 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_1T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_1T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
1 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_1T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_4T_float(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim,
4 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_4T_float)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
void BM_ReverseRowsOf4Channels_4T_uint8(::testing::benchmark::State& state) {
const int outer_dim = state.range(0);
const int middle_dim = state.range(1);
RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim,
4 , 4 );
}
BENCHMARK(BM_ReverseRowsOf4Channels_4T_uint8)
->UseRealTime()
->ArgPair(288, 288)
->ArgPair(1024, 1024)
->ArgPair(10 * 1024, 1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/reverse_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/reverse_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9aa7f320-8708-44ab-8106-6d86e1782605 | cpp | tensorflow/tensorflow | space_to_batch_converter | third_party/xla/xla/service/space_to_batch_converter.cc | third_party/xla/xla/service/space_to_batch_converter_test.cc | #include "xla/service/space_to_batch_converter.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <queue>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/core/bitmap.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace m = match;
constexpr int64_t kNumMappedDims = 3;
class ConvolutionVisitor {
public:
absl::Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution);
struct ConvDetails {
std::vector<int64_t> spatial_dimensions_to_split;
int64_t inherent_low_padding, inherent_high_padding, stride, spatial_size,
base_dilation_factor, halo_size, high_padding_for_conv,
low_padding_for_conv, kernel_spatial_dim_size, input_dim_size;
};
ConvDetails GetConvolutionDetails(HloInstruction* convolution,
ConvolutionDimensionNumbers& dim_numbers);
std::pair<std::vector<int64_t>, std::vector<int64_t>> GetSpatialDimsToSplit(
HloInstruction* old_operand);
bool IsForwardWindowDilatedConv(HloInstruction* convolution,
ConvolutionDimensionNumbers& dim_numbers);
bool CanPropagate(HloInstruction* consumer, HloInstruction* producer);
bool IsBroadcastTree(HloInstruction* op, HloInstruction* consumer,
std::vector<HloInstruction*>& instructions_to_transform);
void RewriteBroadcastTree(
HloInstruction* producer,
std::vector<HloInstruction*>& instructions_to_transform);
void PropagateOnBroadcast(HloInstruction* consumer, HloInstruction* producer);
bool IsOpcodeNonPropagatable(HloInstruction* consumer);
bool SupportedOpForPropagation(HloInstruction* consumer,
HloInstruction* producer);
bool SupportedDotForPropagation(HloInstruction* consumer,
HloInstruction* producer);
bool IsBroadcastPropagatable(HloInstruction* broadcast,
HloInstruction* old_other_op);
absl::StatusOr<bool> Propagate(HloInstruction* consumer,
HloInstruction* producer);
absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>> SplitSpace(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits,
std::vector<int64_t>* spatial_dimensions_to_split,
bool is_backprop = false, bool is_rhs = false);
absl::StatusOr<HloInstruction*> PerformSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t spatial_split_size,
int64_t num_splits);
absl::StatusOr<HloInstruction*> TransposeAndMergeBatch(
HloInstruction* activations,
absl::Span<const int64_t> final_split_spatial_dim_positioning,
int64_t activations_batch_dim, int64_t old_batch_size);
absl::StatusOr<HloInstruction*> PadAndSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits);
absl::StatusOr<HloInstruction*> PropagateOnConstant(HloInstruction* consumer,
HloInstruction* producer);
absl::Status PropagateOnConv(HloInstruction* convolution);
absl::Status PropagateOnConcat(HloInstruction* concat);
absl::Status PropagateOnReverse(HloInstruction* reverse);
absl::Status PropagateOnPad(HloInstruction* pad);
absl::Status PropagateOnSlice(HloInstruction* slice);
absl::Status PropagateOnBackpropFilterConv(HloInstruction* convolution);
bool IsConvSuitableForSpaceToBatch(HloInstruction* convolution);
bool IsThisBackPropFilterConv(HloInstruction* convolution);
absl::Status PropagateOnUsers(HloInstruction* old_conv);
absl::StatusOr<HloInstruction*> SelectValidPortion(
HloInstruction* new_instr, HloInstruction* old_instr,
HloInstruction* select_val, int64_t new_batch_dim,
absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim,
absl::Span<const int64_t> old_space_dims);
struct SpaceNextToBatchDetails {
HloInstruction* instr;
std::vector<int64_t> transpose_dims;
};
absl::StatusOr<SpaceNextToBatchDetails> BringSpaceNextToBatch(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim,
std::vector<int64_t>* spatial_dimensions_to_split,
bool is_backprop = false, bool is_rhs = false);
absl::StatusOr<HloInstruction*> ChangeSpatialSizeOnSpaceToBatchedShape(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t new_spatial_dim_size, bool increase_spatial_size = false);
absl::StatusOr<HloInstruction*> SplitAndTransposeMergedBatch(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions);
absl::StatusOr<HloInstruction*> BatchToSpace(HloInstruction* old_instr);
absl::StatusOr<HloInstruction*> HaloDuplicateWithSlice(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,
HloInstruction* pad_val = nullptr);
absl::StatusOr<bool> Run();
const bool changed() const { return changed_; }
~ConvolutionVisitor() = default;
explicit ConvolutionVisitor(SpaceToBatchController ctrl,
HloComputation* computation);
int64_t GetFirstChosenSpatialDim(HloInstruction* convolution) {
const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;
const int64_t end_point = convolution->convolution_dimension_numbers()
.input_spatial_dimensions_size() -
ctrl_.dimension_from_end_to_convert;
return end_point - dim_count + 1;
}
std::vector<int64_t> GetChosenSpatialDims(HloInstruction* convolution) {
const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
std::vector<int64_t> dims(dim_count);
for (int i = 0; i < dim_count; ++i) {
dims[i] =
convolution->convolution_dimension_numbers().input_spatial_dimensions(
first_dim + i);
}
return dims;
}
int64_t DimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) {
return permute_dims[id];
}
int DimMapper(SpaceToBatchDimMap s) { return static_cast<int>(s); }
int64_t ReverseDimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) {
return std::distance(permute_dims.begin(), absl::c_find(permute_dims, id));
}
HloInstruction* DoesConvolutionFeedReduceWindowOrSelectAndScatter(
HloInstruction* instr, int64_t depth);
bool DoesConvolutionFeedUnpropagatableOp(
HloInstruction* instr, int64_t depth = kUnpropagatableOpSearchDepth);
bool IsSpaceToBatchedSpaceSizeSuitable(HloInstruction* instr);
private:
HloComputation* computation_;
absl::flat_hash_set<HloInstruction*> convs_to_visit_;
std::vector<HloInstruction*> conv_visitor_list_;
HloInstructionSet non_propagatable_instrs_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> batch_to_space_map_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_instrs_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> instr_to_dim_map_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>>
instr_to_dim_permute_map_;
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>>
broadcast_map_;
bool changed_ = false;
static constexpr int64_t kReduceWindowSearchDepth = 10;
static constexpr int64_t kUnpropagatableOpSearchDepth = 3;
static constexpr int64_t kMultiplierOnSpaceForBaseDilation = 3;
absl::flat_hash_map<std::pair<HloInstruction*, int64_t>, bool>
unpropagatability_cache_;
SpaceToBatchController ctrl_;
};
ConvolutionVisitor::ConvolutionVisitor(SpaceToBatchController ctrl,
HloComputation* computation) {
ctrl_ = ctrl;
computation_ = computation;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kConvolution) {
continue;
}
auto convolution = inst;
if (!IsConvSuitableForSpaceToBatch(convolution)) {
VLOG(1) << "Conv not suitable for space-to-batch "
<< convolution->ToString();
continue;
}
VLOG(1) << "Conv added to space-to-batch worklist "
<< convolution->ToString();
convs_to_visit_.insert(convolution);
conv_visitor_list_.push_back(convolution);
}
}
std::pair<std::vector<int64_t>, std::vector<int64_t>>
ConvolutionVisitor::GetSpatialDimsToSplit(HloInstruction* old_operand) {
auto new_operand = old_to_new_instrs_[old_operand];
auto dim_map_val = instr_to_dim_map_[old_operand];
auto permute_dims = instr_to_dim_permute_map_[new_operand];
std::vector<int64_t> old_dims(ctrl_.count_of_dimensions_to_convert),
new_dims(ctrl_.count_of_dimensions_to_convert);
old_dims[0] = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
new_dims[0] = DimLookUp(permute_dims, old_dims[0]);
for (int i = 1; i < ctrl_.count_of_dimensions_to_convert; ++i) {
old_dims[i] = old_dims[0] + i;
new_dims[i] = new_dims[0] + i;
}
return std::make_pair(old_dims, new_dims);
}
bool ConvolutionVisitor::IsForwardWindowDilatedConv(
HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {
const int64_t window_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
if (window_dilation_factor == 1) {
return false;
}
const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
return convolution->operand(1)->shape().dimensions(kernel_spatial_dim) <
convolution->shape().dimensions(output_spatial_dim);
}
bool ConvolutionVisitor::IsConvSuitableForSpaceToBatch(
HloInstruction* convolution) {
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
if (GetFirstChosenSpatialDim(convolution) < 0) {
return false;
}
if (convolution->batch_group_count() != 1) {
return false;
}
if (convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation() != 1) {
if (!IsForwardWindowDilatedConv(convolution, dim_numbers)) {
return false;
}
}
const ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);
const int64_t low_pad = convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
if (c.base_dilation_factor != 1) {
if (!ctrl_.enable_propagations_on_base_dilations) {
return false;
}
if (c.stride != 1) {
return false;
}
if (low_pad == 0) {
if (c.kernel_spatial_dim_size != 1) {
return false;
}
} else if (low_pad != c.base_dilation_factor - 1 &&
low_pad != c.base_dilation_factor) {
return false;
}
}
int64_t activations_batch_dim = dim_numbers.input_batch_dimension();
const int64_t old_batch_size =
convolution->operand(0)->shape().dimensions(activations_batch_dim);
if (old_batch_size > ctrl_.limit_on_batch_size) {
return false;
}
VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size;
if (c.halo_size > CeilOfRatio(c.spatial_size, ctrl_.number_of_splits)) {
return false;
}
if (c.base_dilation_factor > 1 &&
c.inherent_low_padding == c.base_dilation_factor) {
if (c.spatial_size <
kMultiplierOnSpaceForBaseDilation * ctrl_.number_of_splits) {
return false;
}
}
VLOG(1) << "Legal space-to-batch convolution " << convolution->ToString();
return true;
}
bool ConvolutionVisitor::IsThisBackPropFilterConv(HloInstruction* convolution) {
auto activations = convolution->mutable_operand(0);
auto kernel = convolution->mutable_operand(1);
auto dim_numbers = convolution->convolution_dimension_numbers();
if (!old_to_new_instrs_.contains(kernel) &&
!old_to_new_instrs_.contains(activations)) {
return false;
}
if (old_to_new_instrs_.contains(kernel)) {
auto dim_map_val_op_0 = instr_to_dim_map_[kernel];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
if (convolution->convolution_dimension_numbers()
.kernel_input_feature_dimension() != old_batch_dim) {
return false;
}
}
if (old_to_new_instrs_.contains(activations)) {
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
if (dim_numbers.input_feature_dimension() != old_batch_dim) {
return false;
}
}
return true;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::HaloDuplicateWithSlice(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,
HloInstruction* pad_val) {
const int64_t spatial_dim_count = spatial_dimensions_to_split.size();
const int64_t additional_batch_size =
IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count);
const int64_t original_batch_size =
activations->shape().dimensions(activations_batch_dim) /
additional_batch_size;
const int64_t spatial_split_size =
activations->shape().dimensions(spatial_dimensions_to_split[0]);
const int64_t batch_size = ctrl_.number_of_splits;
TF_ASSIGN_OR_RETURN(
activations, SplitAndTransposeMergedBatch(
activations, activations_batch_dim, original_batch_size,
spatial_dimensions_to_split));
const int64_t rank = activations->shape().rank();
VLOG(1) << "In HaloDuplicateWithSlice with activations "
<< activations->ToString() << " batch_size " << batch_size
<< " spatial_split_size " << spatial_split_size << " low_padding "
<< low_padding << " halo size " << halo_size;
CHECK_LE(std::abs(halo_size - low_padding), spatial_split_size);
for (int64_t i = 0; i < spatial_dimensions_to_split.size(); ++i) {
int64_t spatial_dimension_to_split = activations_batch_dim + 2 * (i + 1);
int64_t remapped_batch_dimension = spatial_dimension_to_split - 1;
HloInstruction* first_slice = nullptr;
std::vector<int64_t> strides(rank, 1);
HloInstruction* padding =
pad_val == nullptr
? activations->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(activations->shape().element_type())))
: pad_val;
if (low_padding > 0) {
std::vector<int64_t> start_indices(rank, 0),
end_indices(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
start_indices[spatial_dimension_to_split] =
spatial_split_size - low_padding;
end_indices[remapped_batch_dimension] = batch_size - 1;
end_indices[spatial_dimension_to_split] = spatial_split_size;
TF_ASSIGN_OR_RETURN(first_slice,
MakeSliceHlo(activations, start_indices, end_indices,
strides, &activations->metadata(),
&activations->frontend_attributes()));
VLOG(1) << "first slice " << first_slice->ToString();
PaddingConfig padding_config =
MakeNoPaddingConfig(first_slice->shape().dimensions_size());
padding_config.mutable_dimensions(remapped_batch_dimension)
->set_edge_padding_low(1);
TF_ASSIGN_OR_RETURN(first_slice,
MakePadHlo(first_slice, padding, padding_config,
&first_slice->metadata(),
&first_slice->frontend_attributes()));
}
HloInstruction* halo_region = nullptr;
if (halo_size - low_padding > 0) {
std::vector<int64_t> start_indices_halo(rank, 0),
end_indices_halo(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
start_indices_halo[remapped_batch_dimension] = 1;
end_indices_halo[spatial_dimension_to_split] = halo_size - low_padding;
TF_ASSIGN_OR_RETURN(
halo_region,
MakeSliceHlo(activations, start_indices_halo, end_indices_halo,
strides, &activations->metadata(),
&activations->frontend_attributes()));
VLOG(1) << "halo_region " << halo_region->ToString();
PaddingConfig padding_config_halo =
MakeNoPaddingConfig(halo_region->shape().dimensions_size());
padding_config_halo.mutable_dimensions(remapped_batch_dimension)
->set_edge_padding_high(1);
TF_ASSIGN_OR_RETURN(halo_region,
MakePadHlo(halo_region, padding, padding_config_halo,
&halo_region->metadata(),
&halo_region->frontend_attributes()));
}
if ((halo_size == 0 && low_padding != 0) || low_padding < 0) {
std::vector<int64_t> start_indices_activations_cut(rank, 0),
end_indices_activations_cut(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
if (low_padding > 0) {
end_indices_activations_cut[spatial_dimension_to_split] =
spatial_split_size - low_padding;
} else {
start_indices_activations_cut[spatial_dimension_to_split] =
0 - low_padding;
end_indices_activations_cut[spatial_dimension_to_split] =
spatial_split_size;
}
TF_ASSIGN_OR_RETURN(
activations, MakeSliceHlo(activations, start_indices_activations_cut,
end_indices_activations_cut, strides,
&activations->metadata(),
&activations->frontend_attributes()));
}
if (first_slice != nullptr) {
TF_ASSIGN_OR_RETURN(
activations,
MakeConcatHlo({first_slice, activations}, spatial_dimension_to_split,
&activations->metadata(),
&activations->frontend_attributes()));
}
if (halo_region != nullptr) {
TF_ASSIGN_OR_RETURN(
activations,
MakeConcatHlo({activations, halo_region}, spatial_dimension_to_split,
&activations->metadata(),
&activations->frontend_attributes()));
}
}
TF_ASSIGN_OR_RETURN(
activations,
TransposeAndMergeBatch(
activations,
spatial_dimensions_to_split,
activations_batch_dim, original_batch_size));
VLOG(1) << "HaloDuplicated activations " << activations->ToString();
return activations;
}
absl::StatusOr<ConvolutionVisitor::SpaceNextToBatchDetails>
ConvolutionVisitor::BringSpaceNextToBatch(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim,
std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop,
bool is_rhs) {
for (int64_t i = 1; i < spatial_dimensions_to_split->size(); ++i) {
CHECK_EQ(spatial_dimensions_to_split->at(i),
spatial_dimensions_to_split->at(i - 1) + 1)
<< "Spatial dimensions are not contiguous";
}
int64_t spatial_dimension_to_split = spatial_dimensions_to_split->at(0);
std::vector<int64_t> transpose_dims(activations->shape().rank());
if (spatial_dimension_to_split == activations_batch_dim + 1) {
absl::c_iota(transpose_dims, 0);
} else {
ConvolutionDimensionNumbers new_dim_numbers = dim_numbers;
int64_t pushed_counter = 0;
int64_t new_batch_dim, new_spatial_dim;
int64_t dim_counter = 0;
if (is_rhs) {
CHECK(is_backprop);
for (int i = 0; i < activations->shape().rank(); ++i) {
if (i == activations_batch_dim) {
continue;
}
if (i == spatial_dimension_to_split) {
transpose_dims[dim_counter++] = activations_batch_dim;
new_batch_dim = pushed_counter;
pushed_counter++;
new_spatial_dim = pushed_counter;
}
if (i == dim_numbers.kernel_output_feature_dimension()) {
new_dim_numbers.set_kernel_output_feature_dimension(pushed_counter);
} else {
auto it = absl::c_find(dim_numbers.kernel_spatial_dimensions(), i);
if (it != dim_numbers.kernel_spatial_dimensions().end()) {
int64_t j = it - dim_numbers.kernel_spatial_dimensions().begin();
new_dim_numbers.set_kernel_spatial_dimensions(j, pushed_counter);
}
}
transpose_dims[dim_counter++] = i;
pushed_counter++;
}
activations_batch_dim = new_batch_dim;
spatial_dimension_to_split = new_spatial_dim;
TF_ASSIGN_OR_RETURN(activations,
MakeTransposeHlo(activations, transpose_dims));
new_dim_numbers.set_kernel_input_feature_dimension(activations_batch_dim);
} else {
for (int i = 0; i < activations->shape().rank(); ++i) {
if (i == activations_batch_dim) {
continue;
}
if (i == spatial_dimension_to_split) {
transpose_dims[dim_counter++] = activations_batch_dim;
new_batch_dim = pushed_counter;
pushed_counter++;
new_spatial_dim = pushed_counter;
}
if (is_backprop && i == dim_numbers.input_batch_dimension()) {
new_dim_numbers.set_input_batch_dimension(pushed_counter);
} else if (i == dim_numbers.input_feature_dimension()) {
new_dim_numbers.set_input_feature_dimension(pushed_counter);
} else {
auto it = absl::c_find(dim_numbers.input_spatial_dimensions(), i);
if (it != dim_numbers.input_spatial_dimensions().end()) {
int64_t j = it - dim_numbers.input_spatial_dimensions().begin();
new_dim_numbers.set_input_spatial_dimensions(j, pushed_counter);
}
}
transpose_dims[dim_counter++] = i;
pushed_counter++;
}
activations_batch_dim = new_batch_dim;
spatial_dimension_to_split = new_spatial_dim;
TF_ASSIGN_OR_RETURN(activations,
MakeTransposeHlo(activations, transpose_dims));
if (is_backprop) {
new_dim_numbers.set_input_feature_dimension(activations_batch_dim);
} else {
new_dim_numbers.set_input_batch_dimension(activations_batch_dim);
}
}
dim_numbers = new_dim_numbers;
}
for (int64_t i = 0; i < spatial_dimensions_to_split->size(); ++i) {
(*spatial_dimensions_to_split)[i] = spatial_dimension_to_split + i;
}
return SpaceNextToBatchDetails{activations, transpose_dims};
}
absl::StatusOr<HloInstruction*>
ConvolutionVisitor::SplitAndTransposeMergedBatch(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions) {
CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]);
std::vector<int64_t> new_dimensions(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
const int64_t new_batch_size =
activations->shape().dimensions(batch_dimension);
VLOG(3) << "Decreasing the spatial size while propagating new_batch_size "
<< new_batch_size << " old_batch_size " << old_batch_size;
new_dimensions[batch_dimension] = old_batch_size;
const int64_t spatial_dim_count = spatial_dimensions.size();
for (int64_t i = 0; i < spatial_dim_count; ++i) {
new_dimensions.insert(new_dimensions.begin() + spatial_dimensions[0],
ctrl_.number_of_splits);
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_split_activations,
MakeReshapeHlo(new_dimensions, activations));
if (spatial_dim_count > 1) {
std::vector<int64_t> transpose_dims(new_dimensions.size());
absl::c_iota(transpose_dims, 0);
std::vector<int64_t> trans_dims(new_dimensions.size());
absl::c_iota(trans_dims, 0);
int64_t start_batch_dim_position = batch_dimension + 1;
int64_t start_space_dim_position = batch_dimension + 2;
for (int i = 0; i < spatial_dim_count; ++i) {
transpose_dims[start_batch_dim_position + 2 * i] =
batch_dimension + spatial_dim_count - i;
transpose_dims[start_space_dim_position + 2 * i] =
batch_dimension + spatial_dim_count + 1 + i;
}
TF_ASSIGN_OR_RETURN(
batch_split_activations,
MakeTransposeHlo(batch_split_activations, transpose_dims));
}
return batch_split_activations;
}
absl::StatusOr<HloInstruction*>
ConvolutionVisitor::ChangeSpatialSizeOnSpaceToBatchedShape(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions,
int64_t new_spatial_dim_size, bool increase_spatial_size) {
CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]);
std::vector<int64_t> new_dimensions(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
const int64_t spatial_dim_count = spatial_dimensions.size();
const int64_t spatial_dim_size =
activations->shape().dimensions(spatial_dimensions[0]);
const int64_t reshaped_space_size = spatial_dim_size * ctrl_.number_of_splits;
TF_ASSIGN_OR_RETURN(
HloInstruction * batch_split_activations,
SplitAndTransposeMergedBatch(activations, batch_dimension, old_batch_size,
spatial_dimensions));
std::vector<int64_t> batch_space_collapse_reshape_dims(
batch_split_activations->shape().dimensions().begin(),
batch_split_activations->shape().dimensions().end());
batch_space_collapse_reshape_dims.erase(
batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0],
batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0] +
spatial_dim_count);
for (auto spatial_dimension : spatial_dimensions) {
batch_space_collapse_reshape_dims[spatial_dimension] = reshaped_space_size;
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_space_collapsed_reshape,
MakeReshapeHlo(batch_space_collapse_reshape_dims,
batch_split_activations));
VLOG(3) << "First reshape done";
const int64_t rank = activations->shape().rank();
if (increase_spatial_size) {
PaddingConfig padding_config = MakeNoPaddingConfig(
batch_space_collapsed_reshape->shape().dimensions_size());
for (auto spatial_dimension : spatial_dimensions) {
padding_config.mutable_dimensions(spatial_dimension)
->set_edge_padding_high(new_spatial_dim_size *
ctrl_.number_of_splits -
reshaped_space_size);
padding_config.mutable_dimensions(spatial_dimension)
->set_edge_padding_low(0);
}
HloInstruction* padding = activations->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(
batch_space_collapsed_reshape->shape().element_type())));
TF_ASSIGN_OR_RETURN(
batch_space_collapsed_reshape,
MakePadHlo(batch_space_collapsed_reshape, padding, padding_config,
&batch_space_collapsed_reshape->metadata(),
&batch_space_collapsed_reshape->frontend_attributes()));
} else {
std::vector<int64_t> start_indices(rank, 0),
end_indices(batch_space_collapsed_reshape->shape().dimensions().begin(),
batch_space_collapsed_reshape->shape().dimensions().end()),
strides(rank, 1);
for (auto spatial_dimension : spatial_dimensions) {
end_indices[spatial_dimension] =
new_spatial_dim_size * ctrl_.number_of_splits;
}
TF_ASSIGN_OR_RETURN(
batch_space_collapsed_reshape,
MakeSliceHlo(batch_space_collapsed_reshape, start_indices, end_indices,
strides, &batch_space_collapsed_reshape->metadata(),
&batch_space_collapsed_reshape->frontend_attributes()));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * activations_new,
PerformSplitSpace(batch_space_collapsed_reshape, spatial_dimensions,
batch_dimension, new_spatial_dim_size,
ctrl_.number_of_splits));
VLOG(3) << "Size decreased activations " << activations_new->ToString();
return activations_new;
}
absl::StatusOr<bool> ConvolutionVisitor::Run() {
for (auto conv : conv_visitor_list_) {
if (ctrl_.disable_starting_on_small_chains &&
DoesConvolutionFeedUnpropagatableOp(conv)) {
VLOG(1) << "Giving up on conv " << conv->ToString()
<< " because it feeds an unpropagatable op";
convs_to_visit_.erase(conv);
}
if (convs_to_visit_.count(conv) > 0) {
TF_CHECK_OK(PerformSpaceToBatchOnConvolution(conv));
changed_ = true;
}
}
conv_visitor_list_.clear();
convs_to_visit_.clear();
for (auto instr : non_propagatable_instrs_) {
if (instr->opcode() == HloOpcode::kConvolution) {
VLOG(1) << "Instr " << instr->ToString();
}
if (instr->opcode() == HloOpcode::kConvolution &&
!IsConvSuitableForSpaceToBatch(instr)) {
HloInstruction* producer = nullptr;
if (old_to_new_instrs_.contains(instr->mutable_operand(0))) {
producer = instr->mutable_operand(0);
} else if (old_to_new_instrs_.contains(instr->mutable_operand(1))) {
producer = instr->mutable_operand(1);
}
if (producer) {
if (CanPropagate(instr, producer)) {
bool needs_further_propagation;
TF_ASSIGN_OR_RETURN(needs_further_propagation,
Propagate(instr, producer));
TF_CHECK_OK(computation_->ReplaceInstruction(
instr, old_to_new_instrs_[instr]));
continue;
}
}
}
VLOG(1) << "Could not eventually propagate through " << instr->ToString();
absl::flat_hash_map<int64_t, HloInstruction*> operand_map;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (old_to_new_instrs_.count(instr->mutable_operand(i))) {
TF_ASSIGN_OR_RETURN(operand_map[i],
BatchToSpace(instr->mutable_operand(i)));
}
}
for (auto entry : operand_map) {
TF_CHECK_OK(instr->ReplaceOperandWith(entry.first, entry.second));
}
}
non_propagatable_instrs_.clear();
return changed_;
}
bool IsTrivialElementwise(HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kFusion || hlo->opcode() == HloOpcode::kRng ||
hlo->opcode() == HloOpcode::kCopy ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kIota || hlo->opcode() == HloOpcode::kMap) {
return false;
}
return hlo->IsElementwise();
}
bool ConvolutionVisitor::CanPropagate(HloInstruction* consumer,
HloInstruction* producer) {
if (IsTrivialElementwise(consumer)) {
VLOG(2) << "Doing propagation check on elementwise op: "
<< consumer->ToString();
HloInstruction* pivot_operand = nullptr;
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
auto old_producer = consumer->mutable_operand(i);
std::vector<HloInstruction*> to_transform;
const bool broadcast_or_constant =
(old_producer->opcode() == HloOpcode::kConstant) ||
(old_producer->opcode() == HloOpcode::kBroadcast &&
IsBroadcastPropagatable(old_producer, producer)) ||
(consumer->IsElementwiseBinary() &&
old_producer->opcode() == HloOpcode::kBroadcast &&
IsBroadcastTree(old_producer, producer, to_transform));
if (!old_to_new_instrs_.contains(old_producer) &&
!broadcast_or_constant) {
VLOG(1) << "Cannot propagate on elementwise op " << consumer->ToString()
<< " because operand " << old_producer->ToString()
<< " isn't ready ";
return false;
} else {
if (broadcast_or_constant) {
VLOG(2) << "Skipping on " << old_producer->ToString();
continue;
}
CHECK(old_to_new_instrs_.contains(old_producer));
CHECK(instr_to_dim_map_.contains(old_producer));
if (pivot_operand == nullptr) {
pivot_operand = old_producer;
VLOG(2) << "Elementwise op: pivot " << old_producer->ToString();
} else {
if (instr_to_dim_map_[pivot_operand]
[DimMapper(SpaceToBatchDimMap::kBatch)] !=
instr_to_dim_map_[old_producer]
[DimMapper(SpaceToBatchDimMap::kBatch)] ||
instr_to_dim_map_[pivot_operand]
[DimMapper(SpaceToBatchDimMap::kSpace0)] !=
instr_to_dim_map_[old_producer]
[DimMapper(SpaceToBatchDimMap::kSpace0)]) {
VLOG(2) << "Elementwise op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to changed batch space ordering ";
return false;
}
auto pivot_new_instr = old_to_new_instrs_[pivot_operand];
auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr];
auto new_instr = old_to_new_instrs_[old_producer];
auto permute_dims = instr_to_dim_permute_map_[new_instr];
for (int j = 0; j < pivot_permute_dims.size(); ++j) {
if (pivot_permute_dims[j] != permute_dims[j]) {
VLOG(2) << "Elementwise op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to permuted dimensions ";
return false;
}
if (pivot_new_instr->shape().dimensions(j) !=
new_instr->shape().dimensions(j)) {
if (!((consumer->IsElementwiseBinary() ||
consumer->opcode() == HloOpcode::kSelect) &&
j == instr_to_dim_map_[pivot_operand][DimMapper(
SpaceToBatchDimMap::kSpace0)])) {
VLOG(2) << "Elementwise op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to changed shape sizes ";
return false;
}
}
}
}
}
}
}
if (consumer->opcode() == HloOpcode::kConcatenate) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (!instr_to_dim_map_.contains(consumer->mutable_operand(i))) {
return false;
}
}
auto pivot_operand = consumer->mutable_operand(0);
auto pivot_new_instr = old_to_new_instrs_[pivot_operand];
auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr];
for (int64_t i = 1; i < consumer->operand_count(); ++i) {
auto new_instr = old_to_new_instrs_[consumer->mutable_operand(i)];
auto permute_dims = instr_to_dim_permute_map_[new_instr];
for (int j = 0; j < pivot_permute_dims.size(); ++j) {
if (pivot_permute_dims[j] != permute_dims[j]) {
VLOG(2) << "Concat op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to permuted dimensions ";
return false;
}
if (pivot_new_instr->shape().dimensions(j) !=
new_instr->shape().dimensions(j)) {
VLOG(2) << "Concat op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to changed shape sizes ";
return false;
}
}
}
return true;
}
if (consumer->opcode() == HloOpcode::kConvolution) {
if (!ConsumeFuel("space-to-batch-converter", [&] {
return "Skipping space-to-batch propagation because fuel over\n";
})) {
return false;
}
auto are_conv_dims_compatible =
[&](const ConvolutionDimensionNumbers dim_numbers,
std::vector<int64_t>& dim_map, bool check_lhs) {
if (check_lhs) {
if (dim_numbers.input_spatial_dimensions(
GetFirstChosenSpatialDim(consumer)) !=
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) {
return false;
}
for (int i = 0; i < dim_numbers.input_spatial_dimensions().size();
++i) {
if (dim_numbers.input_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] ||
dim_numbers.input_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) {
return false;
}
}
} else {
if (dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(consumer)) !=
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) {
return false;
}
for (int i = 0; i < dim_numbers.kernel_spatial_dimensions().size();
++i) {
if (dim_numbers.kernel_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] ||
dim_numbers.kernel_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) {
return false;
}
}
}
return true;
};
VLOG(1) << "Checking if conv is supported for propagation "
<< consumer->ToString();
bool found_good_non_window_dilated_conv = true;
if (IsConvSuitableForSpaceToBatch(consumer)) {
if (!old_to_new_instrs_.contains(consumer->mutable_operand(0))) {
found_good_non_window_dilated_conv = false;
}
ConvolutionDimensionNumbers dim_numbers =
consumer->convolution_dimension_numbers();
ConvDetails c = GetConvolutionDetails(consumer, dim_numbers);
auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));
std::vector<int64_t> new_spatial_dims = retval.second;
auto new_activations = old_to_new_instrs_[consumer->mutable_operand(0)];
if (new_activations->shape().dimensions(retval.second[0]) <
c.inherent_low_padding) {
return false;
}
auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)];
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_0, true)) {
found_good_non_window_dilated_conv = false;
}
if (consumer->convolution_dimension_numbers().input_batch_dimension() !=
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]) {
found_good_non_window_dilated_conv = false;
}
if (found_good_non_window_dilated_conv) {
return true;
}
}
if (!ctrl_.enable_propagations_on_window_dilations) {
return false;
}
if (!IsThisBackPropFilterConv(consumer)) {
return false;
}
if (GetFirstChosenSpatialDim(consumer) < 0) {
return false;
}
if (consumer->window()
.dimensions(GetFirstChosenSpatialDim(consumer))
.stride() != 1) {
return false;
}
if (consumer->feature_group_count() != 1) {
return false;
}
VLOG(2) << "Checking for backprop filter conv propagatability";
CHECK_EQ(consumer->operand_count(), 2);
auto activations = consumer->mutable_operand(0);
auto kernel = consumer->mutable_operand(1);
auto win_dims =
consumer->window().dimensions(GetFirstChosenSpatialDim(consumer));
const int64_t rhs_dilation = win_dims.window_dilation();
const int64_t lhs_dilation = win_dims.base_dilation();
if (lhs_dilation != 1) {
return false;
}
if (rhs_dilation == 1 &&
!ctrl_.enable_propagations_on_trivial_window_dilations) {
if (!old_to_new_instrs_.contains(kernel) ||
!old_to_new_instrs_.contains(activations)) {
return false;
}
}
if (!old_to_new_instrs_.contains(kernel)) {
const int64_t rhs_batch =
kernel->shape().dimensions(consumer->convolution_dimension_numbers()
.kernel_input_feature_dimension());
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto first_operand = old_to_new_instrs_[activations];
auto permute_dims_first_operand =
instr_to_dim_permute_map_[first_operand];
const int64_t new_batch_dim =
DimLookUp(permute_dims_first_operand, old_batch_dim);
const int64_t new_space_dim =
DimLookUp(permute_dims_first_operand, old_space_dim);
const int64_t lhs_batch =
first_operand->shape().dimensions(new_batch_dim);
if (first_operand->shape().dimensions(new_space_dim) % rhs_dilation !=
0) {
return false;
}
if (rhs_batch * ctrl_.number_of_splits != lhs_batch) {
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_0, true)) {
return false;
}
VLOG(2)
<< "Backprop filter conv ready for propagation: activations ready, "
" kernel will be space-to-batched";
return true;
}
if (!old_to_new_instrs_.contains(activations)) {
const int64_t lhs_batch = activations->shape().dimensions(
consumer->convolution_dimension_numbers().input_feature_dimension());
auto dim_map_val_op_1 = instr_to_dim_map_[consumer->mutable_operand(1)];
const int64_t old_batch_dim =
dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)];
auto second_operand = old_to_new_instrs_[kernel];
auto permute_dims_second_operand =
instr_to_dim_permute_map_[second_operand];
const int64_t new_batch_dim =
DimLookUp(permute_dims_second_operand, old_batch_dim);
const int64_t rhs_batch =
second_operand->shape().dimensions(new_batch_dim);
if (rhs_batch != ctrl_.number_of_splits * lhs_batch) {
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_1, false)) {
return false;
}
VLOG(2) << "Backprop filter conv ready for propagation: kernel ready, "
" activations will be space-to-batched";
return true;
}
auto first_operand = old_to_new_instrs_[activations];
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
auto second_operand = old_to_new_instrs_[kernel];
auto dim_map_val_op_1 = instr_to_dim_map_[kernel];
auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];
auto permute_dims_second_operand =
instr_to_dim_permute_map_[second_operand];
const int64_t new_batch_dim_operand_0 =
DimLookUp(permute_dims_first_operand,
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]);
const int64_t new_space_dim_operand_0 =
DimLookUp(permute_dims_first_operand,
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)]);
const int64_t new_batch_dim_operand_1 =
DimLookUp(permute_dims_second_operand,
dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)]);
const int64_t new_space_dim_operand_1 =
DimLookUp(permute_dims_second_operand,
dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kSpace0)]);
if (first_operand->shape().dimensions(new_batch_dim_operand_0) !=
second_operand->shape().dimensions(new_batch_dim_operand_1)) {
VLOG(2) << "Backprop filter conv not ready for propagation because batch "
"dimensions don't line up";
return false;
}
if (first_operand->shape().dimensions(new_space_dim_operand_0) >
rhs_dilation *
second_operand->shape().dimensions(new_space_dim_operand_1)) {
VLOG(2) << "Backprop filter conv not ready for propagation because of "
"dilation factor mismatch";
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_0, true)) {
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_1, false)) {
return false;
}
VLOG(2) << "Backprop filter conv ready for propagation";
return true;
}
if (consumer->opcode() == HloOpcode::kReduceWindow ||
consumer->opcode() == HloOpcode::kReduce) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
auto old_producer = consumer->mutable_operand(i);
if (i == 0 && !old_to_new_instrs_.contains(old_producer)) {
return false;
}
}
if (consumer->opcode() == HloOpcode::kReduceWindow) {
return IsSpaceToBatchedSpaceSizeSuitable(consumer);
}
}
if (consumer->opcode() == HloOpcode::kSelectAndScatter) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
auto old_producer = consumer->mutable_operand(i);
if (i < 2 && !old_to_new_instrs_.contains(old_producer)) {
return false;
}
}
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)];
auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)];
auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];
auto permute_dims_second_operand =
instr_to_dim_permute_map_[second_operand];
if (permute_dims_first_operand != permute_dims_second_operand) {
VLOG(2) << "Can't propagate through select and scatter due to "
"permutation mismatch";
return false;
}
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t new_batch_dim =
DimLookUp(permute_dims_first_operand, old_batch_dim);
const int64_t new_space_dim =
DimLookUp(permute_dims_first_operand, old_space_dim);
if (first_operand->shape().dimensions(new_batch_dim) !=
second_operand->shape().dimensions(new_batch_dim)) {
VLOG(2)
<< "Can't propagate through select and scatter due to dim mismatch";
return false;
}
const int64_t stride =
consumer->window().dimensions(old_space_dim).stride();
const int64_t pad_high =
consumer->window().dimensions(old_space_dim).padding_high();
const int64_t pad_low =
consumer->window().dimensions(old_space_dim).padding_low();
if ((first_operand->shape().dimensions(new_space_dim) + pad_high +
pad_low) /
stride !=
second_operand->shape().dimensions(new_space_dim)) {
VLOG(2) << "Can't propagate through select and scatter due to stride "
"mismatch";
return false;
}
return IsSpaceToBatchedSpaceSizeSuitable(consumer);
}
return true;
}
void ConvolutionVisitor::PropagateOnBroadcast(HloInstruction* consumer,
HloInstruction* producer) {
auto new_producer = old_to_new_instrs_[producer];
auto permute_dims = instr_to_dim_permute_map_[new_producer];
auto dim_map_val = instr_to_dim_map_[producer];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto orig_broadcast_dims = consumer->dimensions();
bool batch_is_broadcasted =
absl::c_linear_search(orig_broadcast_dims, old_batch_dim);
const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim);
bool map_found = broadcast_map_.contains(consumer);
if (map_found) {
for (auto previous_broadcast : broadcast_map_[consumer]) {
if (ShapeUtil::CompatibleIgnoringElementType(previous_broadcast->shape(),
new_producer->shape())) {
return;
}
}
}
std::vector<int64_t> final_shape_dims(
new_producer->shape().dimensions().begin(),
new_producer->shape().dimensions().end());
if (batch_is_broadcasted) {
final_shape_dims[new_batch_dim] =
producer->shape().dimensions(old_batch_dim);
final_shape_dims[new_space_dim] *= ctrl_.number_of_splits;
}
std::vector<int64_t> broadcast_dims;
const auto& dimensions = consumer->dimensions();
broadcast_dims.reserve(dimensions.size());
for (auto j : dimensions) {
broadcast_dims.push_back(DimLookUp(permute_dims, j));
}
auto new_broadcast = MakeBroadcastHlo(
consumer->mutable_operand(0), broadcast_dims, final_shape_dims,
&consumer->metadata(), &consumer->frontend_attributes());
VLOG(1) << "Created broadcast " << new_broadcast->ToString();
if (batch_is_broadcasted) {
new_broadcast =
MakeReshapeHlo(new_producer->shape().dimensions(), new_broadcast)
.value();
VLOG(2) << "Created reshape of broadcast " << new_broadcast->ToString();
}
if (!map_found) {
absl::flat_hash_set<HloInstruction*> set_of_broadcasts;
broadcast_map_[consumer] = set_of_broadcasts;
}
broadcast_map_[consumer].insert(new_broadcast);
}
void ConvolutionVisitor::RewriteBroadcastTree(
HloInstruction* producer,
std::vector<HloInstruction*>& instructions_to_transform) {
CHECK(old_to_new_instrs_.contains(producer));
for (auto instr : instructions_to_transform) {
if (instr->opcode() == HloOpcode::kBroadcast) {
PropagateOnBroadcast(instr, producer);
} else if (IsTrivialElementwise(instr)) {
Propagate(instr, instr->mutable_operand(0)).value();
} else {
LOG(FATAL) << "Unsupported opcode in RewriteBroadcastTree";
}
}
}
bool ConvolutionVisitor::IsBroadcastTree(
HloInstruction* op, HloInstruction* consumer,
std::vector<HloInstruction*>& instructions_to_transform) {
if (op->opcode() == HloOpcode::kBroadcast) {
if (IsBroadcastPropagatable(op, consumer)) {
instructions_to_transform.push_back(op);
return true;
} else {
return false;
}
}
if (Match(op, m::ConstantScalar())) {
return true;
}
if (!IsTrivialElementwise(op)) {
return false;
}
for (int64_t i = 0; i < op->operand_count(); ++i) {
if (!IsBroadcastTree(op->mutable_operand(i), consumer,
instructions_to_transform)) {
return false;
}
}
instructions_to_transform.push_back(op);
return true;
}
bool ConvolutionVisitor::IsBroadcastPropagatable(HloInstruction* broadcast,
HloInstruction* old_other_op) {
CHECK_EQ(broadcast->opcode(), HloOpcode::kBroadcast);
CHECK(instr_to_dim_map_.contains(old_other_op));
auto result = instr_to_dim_map_[old_other_op];
const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto broadcast_dims = broadcast->dimensions();
return !absl::c_linear_search(broadcast_dims, space_dim);
}
bool ConvolutionVisitor::IsOpcodeNonPropagatable(HloInstruction* consumer) {
switch (consumer->opcode()) {
case HloOpcode::kCustomCall:
return true;
default:
return false;
}
}
bool ConvolutionVisitor::SupportedDotForPropagation(HloInstruction* consumer,
HloInstruction* producer) {
if (consumer->opcode() != HloOpcode::kDot) {
return false;
}
auto operand = consumer->mutable_operand(0);
if (operand != producer || !instr_to_dim_map_.contains(operand)) {
return false;
}
const auto& dnums = consumer->dot_dimension_numbers();
const auto& contracting_dims = dnums.lhs_contracting_dimensions();
const auto& batch_dims = dnums.lhs_batch_dimensions();
auto result = instr_to_dim_map_[operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_feature_dim =
result[DimMapper(SpaceToBatchDimMap::kFeature)];
if (consumer->operand(1)->shape().rank() ==
batch_dims.size() + contracting_dims.size()) {
return false;
}
bool found = false;
for (auto dim : batch_dims) {
if (dim == old_batch_dim || dim == old_space_dim) {
return false;
}
if (dim == old_feature_dim) {
found = true;
}
}
if (!found) {
return false;
}
for (auto dim : contracting_dims) {
if (dim == old_batch_dim || dim == old_space_dim) {
return false;
}
}
return true;
}
bool ConvolutionVisitor::SupportedOpForPropagation(HloInstruction* consumer,
HloInstruction* producer) {
if (IsOpcodeNonPropagatable(consumer)) {
return false;
}
if (IsTrivialElementwise(consumer)) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {
if (!IsBroadcastPropagatable(consumer->mutable_operand(i), producer)) {
VLOG(2) << "Could not propagate through broadcast";
return false;
}
}
}
return true;
}
if (consumer->opcode() == HloOpcode::kConvolution) {
return true;
}
if (consumer->opcode() == HloOpcode::kConcatenate) {
HloInstruction* pivot_operand = nullptr;
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (instr_to_dim_map_.contains(consumer->mutable_operand(i))) {
pivot_operand = consumer->mutable_operand(i);
break;
}
}
if (pivot_operand == nullptr) {
VLOG(1) << "Concat: Dim map not found on any operand";
return false;
}
auto result = instr_to_dim_map_[pivot_operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (consumer->concatenate_dimension() == old_batch_dim ||
consumer->concatenate_dimension() == old_space_dim) {
return false;
}
return true;
}
if (consumer->opcode() == HloOpcode::kReverse) {
auto operand_0 = consumer->mutable_operand(0);
if (!instr_to_dim_map_.contains(operand_0)) {
return false;
}
auto result = instr_to_dim_map_[operand_0];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
for (auto dim : consumer->dimensions()) {
if (dim == old_batch_dim || dim == old_space_dim) {
return false;
}
}
return true;
}
if (consumer->opcode() == HloOpcode::kTranspose) {
return true;
}
if (consumer->opcode() == HloOpcode::kPad) {
auto operand_0 = consumer->mutable_operand(0);
if (!instr_to_dim_map_.contains(operand_0)) {
return false;
}
auto result = instr_to_dim_map_[operand_0];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto does_dim_have_padding = [](PaddingConfig padding_config, int64_t dim) {
return padding_config.dimensions(dim).edge_padding_low() != 0 ||
padding_config.dimensions(dim).edge_padding_high() != 0 ||
padding_config.dimensions(dim).interior_padding() != 0;
};
if (does_dim_have_padding(consumer->padding_config(), old_batch_dim) ||
does_dim_have_padding(consumer->padding_config(), old_space_dim)) {
return false;
}
return true;
}
if (consumer->opcode() == HloOpcode::kSlice) {
auto operand = consumer->mutable_operand(0);
if (!instr_to_dim_map_.contains(operand)) {
return false;
}
auto result = instr_to_dim_map_[operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (consumer->shape().dimensions(old_batch_dim) !=
operand->shape().dimensions(old_batch_dim)) {
return false;
}
if (consumer->shape().dimensions(old_space_dim) !=
operand->shape().dimensions(old_space_dim)) {
return false;
}
return true;
}
if (SupportedDotForPropagation(consumer, producer)) {
return true;
}
if (consumer->opcode() == HloOpcode::kReduce) {
if (consumer->shape().IsTuple()) {
return false;
}
auto reduce_dims = consumer->dimensions();
auto result = instr_to_dim_map_[consumer->mutable_operand(0)];
const int64_t batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (!absl::c_linear_search(reduce_dims, batch_dim) &&
!absl::c_linear_search(reduce_dims, space_dim)) {
return true;
}
return absl::c_linear_search(reduce_dims, batch_dim) &&
absl::c_linear_search(reduce_dims, space_dim);
}
if (consumer->opcode() == HloOpcode::kReduceWindow &&
consumer->shape().IsTuple()) {
return false;
}
if (consumer->opcode() == HloOpcode::kReduceWindow ||
consumer->opcode() == HloOpcode::kSelectAndScatter) {
auto first_operand = consumer->mutable_operand(0);
auto window = consumer->window();
if (instr_to_dim_map_.count(first_operand) <= 0) {
VLOG(1) << "Dim map not found on windowed operand. Window dim count "
<< window.dimensions().size();
return false;
}
auto result = instr_to_dim_map_[first_operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (window.dimensions(old_batch_dim).size() != 1) {
return false;
}
if (window.dimensions(old_space_dim).padding_low() != 0) {
return false;
}
if (window.dimensions(old_space_dim).base_dilation() != 1 ||
window.dimensions(old_space_dim).window_dilation() != 1) {
return false;
}
if (window.dimensions(old_batch_dim).base_dilation() != 1 ||
window.dimensions(old_batch_dim).window_dilation() != 1) {
return false;
}
if (window.dimensions(old_space_dim).padding_high() >
window.dimensions(old_space_dim).size()) {
return false;
}
if (old_to_new_instrs_.count(first_operand) <= 0) {
return false;
}
auto new_operand = old_to_new_instrs_[first_operand];
auto permute_dims = instr_to_dim_permute_map_[new_operand];
if (consumer->opcode() == HloOpcode::kSelectAndScatter) {
const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim);
if (new_operand->shape().dimensions(new_space_dim) %
window.dimensions(old_space_dim).stride() !=
0) {
return false;
}
if (!ShapeUtil::ElementIsFloating(consumer->shape())) {
return false;
}
auto scatter_comp = consumer->scatter();
if (!Match(scatter_comp->root_instruction(),
m::AddAnyOrder(m::Parameter(0), m::Parameter(1)))) {
return false;
}
auto select_comp = consumer->select();
if (!Match(select_comp->root_instruction(),
m::Compare(m::Parameter(0), m::Parameter(1))
.WithComparisonDirection(ComparisonDirection::kGe)) &&
!Match(select_comp->root_instruction(),
m::Compare(m::Parameter(1), m::Parameter(0))
.WithComparisonDirection(ComparisonDirection::kGe))) {
return false;
}
if (consumer->window().dimensions(old_space_dim).padding_low() != 0) {
return false;
}
}
return true;
}
return false;
}
absl::StatusOr<bool> ConvolutionVisitor::Propagate(HloInstruction* consumer,
HloInstruction* producer) {
auto computation = consumer->parent();
if (IsTrivialElementwise(consumer)) {
auto dim_map_val = instr_to_dim_map_[producer];
auto new_consumer = computation->AddInstruction(consumer->Clone());
bool is_pivot_producer_modified = false;
if (consumer->IsElementwiseBinary() ||
consumer->opcode() == HloOpcode::kSelect) {
int64_t pivot_operand_number = -1;
HloInstruction* pivot_operand = nullptr;
for (int i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {
continue;
}
auto operand = consumer->mutable_operand(i);
if (old_to_new_instrs_.contains(operand)) {
if (pivot_operand_number == -1 ||
old_to_new_instrs_[pivot_operand]->shape().dimensions() <
old_to_new_instrs_[operand]->shape().dimensions()) {
is_pivot_producer_modified = true;
pivot_operand_number = i;
pivot_operand = consumer->mutable_operand(pivot_operand_number);
}
}
}
if (pivot_operand_number != -1) {
producer = pivot_operand;
}
}
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
std::vector<HloInstruction*> instructions_to_transform;
if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {
auto broadcast = consumer->mutable_operand(i);
PropagateOnBroadcast(broadcast, producer);
HloInstruction* new_broadcast = nullptr;
auto new_producer = old_to_new_instrs_[producer];
for (auto previous_broadcast : broadcast_map_[broadcast]) {
if (ShapeUtil::CompatibleIgnoringElementType(
previous_broadcast->shape(), new_producer->shape())) {
new_broadcast = previous_broadcast;
break;
}
}
CHECK_NE(new_broadcast, nullptr);
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(i, new_broadcast));
} else if (old_to_new_instrs_.contains(consumer->mutable_operand(i))) {
HloInstruction* operand_to_use = nullptr;
auto result = instr_to_dim_map_[producer];
const int64_t old_batch_dim =
result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_batch_size =
producer->shape().dimensions(old_batch_dim);
HloInstruction* new_instr =
old_to_new_instrs_[consumer->mutable_operand(i)];
HloInstruction* pivot_new_instr = old_to_new_instrs_[producer];
auto permute_dims = instr_to_dim_permute_map_[new_instr];
const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t space_dim = DimLookUp(permute_dims, old_space_dim);
const int64_t batch_size = new_instr->shape().dimensions(batch_dim);
if (new_instr->shape().dimensions(space_dim) !=
pivot_new_instr->shape().dimensions(space_dim)) {
CHECK_EQ(batch_dim + 1, space_dim);
std::vector<int64_t> new_dimensions(
new_instr->shape().dimensions().begin(),
new_instr->shape().dimensions().end());
new_dimensions[space_dim] *= (batch_size / old_batch_size);
new_dimensions[batch_dim] = old_batch_size;
TF_ASSIGN_OR_RETURN(HloInstruction * reshape,
MakeReshapeHlo(new_dimensions, new_instr));
const int64_t pivot_space_size =
pivot_new_instr->shape().dimensions(space_dim) * batch_size /
old_batch_size;
CHECK(pivot_space_size > new_dimensions[space_dim] ||
!is_pivot_producer_modified);
PaddingConfig padding_config =
MakeNoPaddingConfig(reshape->shape().dimensions_size());
padding_config.mutable_dimensions(space_dim)->set_edge_padding_high(
pivot_space_size - new_dimensions[space_dim]);
padding_config.mutable_dimensions(space_dim)->set_edge_padding_low(0);
HloInstruction* padding =
consumer->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(reshape->shape().element_type())));
TF_ASSIGN_OR_RETURN(
HloInstruction * padded_operand,
MakePadHlo(reshape, padding, padding_config, &reshape->metadata(),
&reshape->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
operand_to_use,
MakeReshapeHlo(pivot_new_instr->shape().dimensions(),
padded_operand));
} else {
operand_to_use = old_to_new_instrs_[consumer->mutable_operand(i)];
}
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(i, operand_to_use));
} else if (consumer->IsElementwiseBinary() &&
consumer->mutable_operand(i)->opcode() ==
HloOpcode::kBroadcast &&
IsBroadcastTree(consumer->mutable_operand(i), producer,
instructions_to_transform)) {
RewriteBroadcastTree(producer, instructions_to_transform);
TF_CHECK_OK(new_consumer->ReplaceOperandWithDifferentShape(
i, old_to_new_instrs_[consumer->mutable_operand(i)]));
} else if (consumer->operand(i)->opcode() == HloOpcode::kConstant) {
TF_ASSIGN_OR_RETURN(
auto new_constant,
PropagateOnConstant(consumer->mutable_operand(i), producer));
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(i, new_constant));
}
}
auto old_type = new_consumer->mutable_shape()->element_type();
*(new_consumer->mutable_shape()) = old_to_new_instrs_[producer]->shape();
new_consumer->mutable_shape()->set_element_type(old_type);
old_to_new_instrs_[consumer] = new_consumer;
instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val);
CHECK(instr_to_dim_permute_map_.contains(old_to_new_instrs_[producer]));
instr_to_dim_permute_map_[new_consumer] = std::vector<int64_t>(
instr_to_dim_permute_map_[old_to_new_instrs_[producer]]);
VLOG(2) << " new_consumer " << new_consumer->ToString()
<< " old_to_new_instrs_[producer] "
<< old_to_new_instrs_[producer]->ToString() << " permute dims "
<< instr_to_dim_permute_map_.count(new_consumer);
return true;
}
if (consumer->opcode() == HloOpcode::kConvolution) {
if (IsConvSuitableForSpaceToBatch(consumer)) {
TF_CHECK_OK(PropagateOnConv(consumer));
return true;
} else {
TF_CHECK_OK(PropagateOnBackpropFilterConv(consumer));
return false;
}
}
if (consumer->opcode() == HloOpcode::kConcatenate) {
TF_CHECK_OK(PropagateOnConcat(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kReverse) {
TF_CHECK_OK(PropagateOnReverse(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kDot) {
auto dim_map_val = instr_to_dim_map_[producer];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
int64_t new_batch_dim = -1;
int64_t new_space_dim = -1;
int64_t outer = 0;
for (int64_t i = 0; i < producer->shape().rank(); ++i) {
if (absl::c_linear_search(
consumer->dot_dimension_numbers().lhs_batch_dimensions(), i) ||
absl::c_linear_search(
consumer->dot_dimension_numbers().lhs_contracting_dimensions(),
i)) {
continue;
}
if (i == old_batch_dim) {
new_batch_dim =
outer +
consumer->dot_dimension_numbers().lhs_batch_dimensions_size();
}
if (i == old_space_dim) {
new_batch_dim =
outer +
consumer->dot_dimension_numbers().lhs_batch_dimensions_size();
}
++outer;
}
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
consumer->shape().rank() - 1;
instr_to_dim_map_[consumer] = dim_map;
auto new_consumer = computation->AddInstruction(consumer->Clone());
new_consumer->mutable_shape()->mutable_dimensions()[new_batch_dim] =
producer->shape().dimensions(old_batch_dim);
new_consumer->mutable_shape()->mutable_dimensions()[new_space_dim] =
producer->shape().dimensions(old_space_dim);
old_to_new_instrs_[consumer] = new_consumer;
return true;
}
if (consumer->opcode() == HloOpcode::kPad) {
TF_CHECK_OK(PropagateOnPad(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kSlice) {
TF_CHECK_OK(PropagateOnSlice(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kReduce) {
auto reduce_dims = consumer->dimensions();
auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t new_space_dim = DimLookUp(permute_dims, space_dim);
std::vector<int64_t> changed_dims(consumer->dimensions().size());
if (!absl::c_linear_search(reduce_dims, old_batch_dim) &&
!absl::c_linear_search(reduce_dims, space_dim)) {
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
changed_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i));
}
int64_t new_output_batch_dim = new_batch_dim;
int64_t new_output_space_dim = new_space_dim;
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
if (changed_dims[i] < new_batch_dim) {
new_output_batch_dim--;
}
if (changed_dims[i] < new_space_dim) {
new_output_space_dim--;
}
}
int64_t old_output_batch_dim = old_batch_dim;
int64_t old_output_space_dim = space_dim;
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
if (reduce_dims[i] < old_batch_dim) {
old_output_batch_dim--;
}
if (reduce_dims[i] < space_dim) {
old_output_space_dim--;
}
}
HloInstruction* new_consumer = nullptr;
TF_ASSIGN_OR_RETURN(
new_consumer,
MakeReduceHlo(first_operand, consumer->mutable_operand(1),
changed_dims, consumer->called_computations()[0]));
VLOG(3) << " new_output_batch_dim " << new_output_batch_dim << " size "
<< first_operand->shape().dimensions(new_batch_dim)
<< " new_output_space_dim " << new_output_space_dim << " size "
<< first_operand->shape().dimensions(new_space_dim);
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = old_output_batch_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = old_output_space_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = -1;
instr_to_dim_map_[consumer] = dim_map;
const int64_t rank = first_operand->shape().rank();
const int64_t output_rank = new_consumer->shape().rank();
std::vector<int64_t> old_reduce_output_to_input(output_rank);
int dim_number_to_assign_old = 0;
for (int64_t i = 0; i < rank; ++i) {
if (auto it = absl::c_find(reduce_dims, i); it != reduce_dims.end()) {
continue;
}
old_reduce_output_to_input[dim_number_to_assign_old++] = i;
}
std::vector<int64_t> new_reduce_output_to_input(output_rank);
int dim_number_to_assign_new = 0;
for (int64_t i = 0; i < rank; ++i) {
if (auto it = absl::c_find(changed_dims, i); it != changed_dims.end()) {
continue;
}
new_reduce_output_to_input[dim_number_to_assign_new++] = i;
}
std::vector<int64_t> new_permute_dims(output_rank);
for (int64_t i = 0; i < output_rank; ++i) {
new_permute_dims[i] = std::distance(
new_reduce_output_to_input.begin(),
absl::c_find(
new_reduce_output_to_input,
DimLookUp(permute_dims, old_reduce_output_to_input[i])));
}
instr_to_dim_permute_map_[new_consumer] = new_permute_dims;
old_to_new_instrs_[consumer] = new_consumer;
return true;
}
HloInstruction* new_consumer =
computation->AddInstruction(consumer->Clone());
auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));
std::vector<int64_t> old_spatial_dims = retval.first;
std::vector<int64_t> new_spatial_dims = retval.second;
TF_ASSIGN_OR_RETURN(
first_operand,
SelectValidPortion(first_operand, consumer->mutable_operand(0),
consumer->mutable_operand(1), new_batch_dim,
new_spatial_dims, old_batch_dim, old_spatial_dims));
for (int64_t i = 0; i < new_consumer->dimensions().size(); ++i) {
changed_dims[i] = DimLookUp(permute_dims, new_consumer->dimensions(i));
}
*(new_consumer->mutable_dimensions()) = changed_dims;
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));
old_to_new_instrs_[consumer] = new_consumer;
instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val);
return false;
}
if (consumer->opcode() == HloOpcode::kTranspose) {
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto new_consumer = computation->AddInstruction(first_operand->Clone());
old_to_new_instrs_[consumer] = new_consumer;
auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_feature_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kFeature)];
int64_t new_batch_dim, new_space_dim, new_feature_dim;
std::vector<int64_t> new_dimensions(consumer->dimensions().size());
for (int64_t ctr = 0; ctr < consumer->dimensions().size(); ++ctr) {
int64_t dim = consumer->dimensions(ctr);
if (dim == old_batch_dim) {
new_batch_dim = ctr;
}
if (dim == old_space_dim) {
new_space_dim = ctr;
}
if (dim == old_feature_dim) {
new_feature_dim = ctr;
}
}
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = new_feature_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim;
instr_to_dim_map_[consumer] = dim_map;
std::vector<int64_t> new_permute_dims(consumer->dimensions().size());
auto permute_dims = instr_to_dim_permute_map_[first_operand];
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
new_permute_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i));
}
instr_to_dim_permute_map_[new_consumer] = new_permute_dims;
return true;
}
if (consumer->opcode() == HloOpcode::kReduceWindow ||
consumer->opcode() == HloOpcode::kSelectAndScatter) {
bool is_select_and_scatter =
consumer->opcode() == HloOpcode::kSelectAndScatter;
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto init_val = is_select_and_scatter ? consumer->mutable_operand(2)
: consumer->mutable_operand(1);
auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];
auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));
std::vector<int64_t> old_spatial_dims = retval.first;
std::vector<int64_t> new_spatial_dims = retval.second;
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = old_spatial_dims[0];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t new_space_dim = new_spatial_dims[0];
auto new_shape = first_operand->shape();
auto old_shape = consumer->mutable_operand(0)->shape();
const int64_t new_space_size = new_shape.dimensions(new_space_dim);
const int64_t stride =
consumer->window().dimensions(old_space_dim).stride();
auto pad_val =
is_select_and_scatter
? consumer->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MinValue(
consumer->operand(2)->shape().element_type())))
: init_val;
TF_ASSIGN_OR_RETURN(
first_operand,
SelectValidPortion(first_operand, consumer->mutable_operand(0), pad_val,
new_batch_dim, new_spatial_dims, old_batch_dim,
old_spatial_dims));
const int64_t extra_space = new_space_size % stride;
if (extra_space) {
CHECK_EQ(consumer->opcode(), HloOpcode::kReduceWindow);
const int64_t old_batch_size = old_shape.dimensions(old_batch_dim);
const int64_t old_space_size = old_shape.dimensions(old_space_dim);
if ((new_space_size - extra_space) * old_batch_size *
ctrl_.number_of_splits >=
old_batch_size * old_space_size) {
TF_ASSIGN_OR_RETURN(
first_operand, ChangeSpatialSizeOnSpaceToBatchedShape(
first_operand, new_batch_dim, old_batch_size,
new_spatial_dims, new_space_size - extra_space));
} else {
TF_ASSIGN_OR_RETURN(
first_operand,
ChangeSpatialSizeOnSpaceToBatchedShape(
first_operand, new_batch_dim, old_batch_size, new_spatial_dims,
new_space_size + stride - extra_space,
true));
}
}
const int64_t window_size =
consumer->window().dimensions(old_space_dim).size();
const int64_t last_overlap_point = ((new_space_size - 1) / stride) * stride;
VLOG(1) << "last_overlap_point " << last_overlap_point << " window_size "
<< window_size << " new_space_size " << new_space_size;
const int64_t halo_size = last_overlap_point + window_size - new_space_size;
if (halo_size > 0) {
TF_ASSIGN_OR_RETURN(
first_operand,
HaloDuplicateWithSlice(first_operand, new_spatial_dims, new_batch_dim,
0, halo_size, init_val));
}
Window new_win;
for (int64_t i = 0; i < consumer->window().dimensions().size(); ++i) {
auto dim = ReverseDimLookUp(permute_dims, i);
new_win.add_dimensions();
new_win.mutable_dimensions(i)->set_stride(
consumer->window().dimensions(dim).stride());
new_win.mutable_dimensions(i)->set_size(
consumer->window().dimensions(dim).size());
if (i == old_space_dim) {
new_win.mutable_dimensions(i)->set_padding_high(0);
new_win.mutable_dimensions(i)->set_padding_low(0);
} else {
new_win.mutable_dimensions(i)->set_padding_high(
consumer->window().dimensions(dim).padding_high());
new_win.mutable_dimensions(i)->set_padding_low(
consumer->window().dimensions(dim).padding_low());
}
new_win.mutable_dimensions(i)->set_window_dilation(
consumer->window().dimensions(dim).window_dilation());
new_win.mutable_dimensions(i)->set_base_dilation(
consumer->window().dimensions(dim).base_dilation());
new_win.mutable_dimensions(i)->set_window_reversal(
consumer->window().dimensions(dim).window_reversal());
}
new_shape = first_operand->shape();
HloInstruction* new_consumer = nullptr;
if (is_select_and_scatter) {
auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)];
auto select_comp = consumer->select();
auto scatter_comp = consumer->scatter();
TF_ASSIGN_OR_RETURN(
auto new_select_and_scatter_shape,
ShapeInference::InferSelectAndScatterShape(
new_shape, select_comp->ComputeProgramShape(), new_win,
second_operand->shape(), init_val->shape(),
scatter_comp->ComputeProgramShape()));
new_consumer = computation_->AddInstruction(
HloInstruction::CreateSelectAndScatter(
new_select_and_scatter_shape, first_operand, select_comp, new_win,
second_operand, init_val, scatter_comp),
&consumer->metadata(), &consumer->frontend_attributes());
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(1, second_operand));
VLOG(2) << "New select and scatter " << new_consumer->ToString();
if (halo_size > 0) {
const int64_t rank = new_consumer->shape().rank();
const int64_t batch_size =
new_consumer->shape().dimensions(new_batch_dim);
std::vector<int64_t> start_indices(rank, 0),
end_indices(new_consumer->shape().dimensions().begin(),
new_consumer->shape().dimensions().end()),
strides(rank, 1);
start_indices[new_space_dim] = new_space_size;
end_indices[new_space_dim] = new_space_size + halo_size;
end_indices[new_batch_dim] = batch_size - 1;
TF_ASSIGN_OR_RETURN(
HloInstruction * bottom,
MakeSliceHlo(new_consumer, start_indices, end_indices, strides,
&consumer->metadata(),
&consumer->frontend_attributes()));
std::vector<int64_t> start_indices_top(rank, 0),
end_indices_top(new_consumer->shape().dimensions().begin(),
new_consumer->shape().dimensions().end());
end_indices_top[new_space_dim] = halo_size;
start_indices_top[new_batch_dim] = 1;
TF_ASSIGN_OR_RETURN(
HloInstruction * top,
MakeSliceHlo(new_consumer, start_indices_top, end_indices_top,
strides, &consumer->metadata(),
&consumer->frontend_attributes()));
HloInstruction* default_fill = MakeBroadcastHlo(
init_val, {}, top->shape().dimensions(), &init_val->metadata(),
&init_val->frontend_attributes());
TF_ASSIGN_OR_RETURN(
HloInstruction * bottom_compare,
MakeCompareHlo(ComparisonDirection::kNe, bottom, default_fill,
&bottom->metadata(),
&bottom->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * bottom_taken,
MakeSelectHlo(bottom_compare, bottom, default_fill, nullptr,
&bottom_compare->metadata(),
&bottom_compare->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * top_compare,
MakeCompareHlo(ComparisonDirection::kNe, top, default_fill,
&top->metadata(), &top->frontend_attributes()));
TF_ASSIGN_OR_RETURN(HloInstruction * top_taken,
MakeSelectHlo(top_compare, top, bottom_taken,
nullptr, &top_compare->metadata(),
&top_compare->frontend_attributes()));
TF_ASSIGN_OR_RETURN(HloInstruction * both_compare,
MakeBinaryHlo(HloOpcode::kAnd, top_compare,
bottom_compare, &consumer->metadata(),
&consumer->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * both_added,
MakeBinaryHlo(HloOpcode::kAdd, top, bottom, &consumer->metadata(),
&consumer->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * final_selection,
MakeSelectHlo(both_compare, both_added, top_taken, nullptr,
&both_compare->metadata(),
&both_compare->frontend_attributes()));
PaddingConfig padding_config =
MakeNoPaddingConfig(final_selection->shape().dimensions_size());
padding_config.mutable_dimensions(new_batch_dim)
->set_edge_padding_low(1);
padding_config.mutable_dimensions(new_space_dim)
->set_edge_padding_high(new_space_size);
HloInstruction* padding = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(final_selection->shape().element_type())),
&consumer->metadata(), &consumer->frontend_attributes());
TF_ASSIGN_OR_RETURN(
final_selection,
MakePadHlo(final_selection, padding, padding_config,
&final_selection->metadata(),
&final_selection->frontend_attributes()));
tsl::core::Bitmap b(batch_size * (new_space_size + halo_size));
for (int k = 0; k < batch_size * (new_space_size + halo_size); ++k) {
const int64_t space_index = k % (new_space_size + halo_size);
const int64_t batch_index = (k / (new_space_size + halo_size));
if (batch_index < 1 || space_index >= halo_size) {
b.set(k);
} else {
b.clear(k);
}
}
auto arg_literal = LiteralUtil::CreateR1(b);
VLOG(4) << "Slice mask created: arg literal " << arg_literal.ToString();
HloInstruction* slice_mask = computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)),
&consumer->metadata(), &consumer->frontend_attributes());
std::vector<int64_t> slice_mask_reshape_dims(2);
slice_mask_reshape_dims[0] = batch_size;
slice_mask_reshape_dims[1] = (new_space_size + halo_size);
TF_ASSIGN_OR_RETURN(
HloInstruction * slice_mask_reshaped,
MakeReshapeHlo(slice_mask_reshape_dims, slice_mask));
HloInstruction* shape_mask = MakeBroadcastHlo(
slice_mask_reshaped, {new_batch_dim, new_space_dim},
final_selection->shape().dimensions(), &slice_mask->metadata(),
&slice_mask->frontend_attributes());
TF_ASSIGN_OR_RETURN(
new_consumer,
MakeSelectHlo(shape_mask, new_consumer, final_selection, nullptr,
&shape_mask->metadata(),
&shape_mask->frontend_attributes()));
}
auto previous_shape =
old_to_new_instrs_[consumer->mutable_operand(0)]->shape();
std::vector<int64_t> start_indices(previous_shape.rank(), 0),
end_indices(previous_shape.dimensions().begin(),
previous_shape.dimensions().end()),
strides(previous_shape.rank(), 1);
TF_ASSIGN_OR_RETURN(new_consumer,
MakeSliceHlo(new_consumer, start_indices, end_indices,
strides, &consumer->metadata(),
&consumer->frontend_attributes()));
} else {
auto reduce_comp = consumer->to_apply();
TF_ASSIGN_OR_RETURN(auto new_reduce_window_shape,
ShapeInference::InferReduceWindowShape(
new_shape, init_val->shape(), new_win));
new_consumer = computation_->AddInstruction(
HloInstruction::CreateReduceWindow(new_reduce_window_shape,
first_operand, init_val, new_win,
reduce_comp),
&consumer->metadata(), &consumer->frontend_attributes());
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));
VLOG(1) << "New reduce window " << new_consumer->ToString();
}
old_to_new_instrs_[consumer] = new_consumer;
instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val);
instr_to_dim_permute_map_[new_consumer] = std::vector<int64_t>(
instr_to_dim_permute_map_[old_to_new_instrs_[consumer->mutable_operand(
0)]]);
return true;
}
LOG(FATAL) << "Trying to propagate through an unsupported instruction "
<< consumer->ToString();
return true;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::SelectValidPortion(
HloInstruction* new_instr, HloInstruction* old_instr,
HloInstruction* select_val, int64_t new_batch_dim,
absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim,
absl::Span<const int64_t> old_space_dims) {
auto new_shape = new_instr->shape();
auto old_shape = old_instr->shape();
VLOG(1) << "In SelectValidPortion new_batch_dim " << new_batch_dim
<< " new_space_dim " << new_space_dims[0] << " old_batch_dim "
<< old_batch_dim << " old_space_dim " << old_space_dims[0];
const int64_t new_batch_size = new_shape.dimensions(new_batch_dim);
const int64_t new_space_size = new_shape.dimensions(new_space_dims[0]);
const int64_t old_batch_size = old_shape.dimensions(old_batch_dim);
const int64_t old_space_size = old_shape.dimensions(old_space_dims[0]);
CHECK_EQ(new_batch_size % old_batch_size, 0)
<< " New batch size " << new_batch_size << " old batch size "
<< old_batch_size;
const int64_t num_splits = ctrl_.number_of_splits;
const int64_t spatial_dim_count = new_space_dims.size();
std::vector<int64_t> bounds(2 + spatial_dim_count, new_space_size);
bounds[0] = old_batch_size;
bounds[1] = IPow<int64_t>(num_splits, spatial_dim_count);
const int64_t total_new_space =
IPow<int64_t>(new_space_size, spatial_dim_count);
tsl::core::Bitmap b(new_batch_size * total_new_space);
for (int k = 0; k < new_batch_size * total_new_space; ++k) {
auto radix = ToMixedRadix(k, bounds);
bool out_of_bounds = false;
int64_t batch_residue = 1;
for (int i = 0; i < spatial_dim_count; ++i) {
const int64_t space_index = radix[2 + i];
const int64_t batch_index = (radix[1] / batch_residue) % num_splits;
batch_residue *= num_splits;
if (batch_index * new_space_size + space_index >= old_space_size) {
out_of_bounds = true;
}
}
if (!out_of_bounds) {
b.set(k);
} else {
b.clear(k);
}
}
auto arg_literal = LiteralUtil::CreateR1(b);
VLOG(4) << "Slice mask created: arg literal " << arg_literal.ToString();
HloInstruction* slice_mask = computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)),
&old_instr->metadata(), &old_instr->frontend_attributes());
std::vector<int64_t> slice_mask_reshape_dims(1 + spatial_dim_count,
new_space_size);
slice_mask_reshape_dims[0] = new_batch_size;
TF_ASSIGN_OR_RETURN(HloInstruction * slice_mask_reshaped,
MakeReshapeHlo(slice_mask_reshape_dims, slice_mask));
std::vector<int64_t> broadcast_dims(new_space_dims.begin(),
new_space_dims.end());
broadcast_dims.insert(broadcast_dims.begin(), new_batch_dim);
HloInstruction* shape_mask = MakeBroadcastHlo(
slice_mask_reshaped, broadcast_dims, new_instr->shape().dimensions(),
&slice_mask_reshaped->metadata(),
&slice_mask_reshaped->frontend_attributes());
VLOG(1) << "Shape mask made " << shape_mask->ToString();
HloInstruction* zeroes = MakeBroadcastHlo(
select_val, {}, new_instr->shape().dimensions(), &select_val->metadata(),
&select_val->frontend_attributes());
TF_ASSIGN_OR_RETURN(new_instr,
MakeSelectHlo(shape_mask, new_instr, zeroes, nullptr,
&shape_mask->metadata(),
&shape_mask->frontend_attributes()));
return new_instr;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::BatchToSpace(
HloInstruction* old_instr) {
if (batch_to_space_map_.count(old_instr)) {
CHECK_NE(batch_to_space_map_[old_instr], nullptr);
return batch_to_space_map_[old_instr];
}
auto result = instr_to_dim_map_[old_instr];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_batch_size = old_instr->shape().dimensions(old_batch_dim);
CHECK(old_to_new_instrs_.contains(old_instr));
auto new_instr = old_to_new_instrs_[old_instr];
VLOG(2) << "old_batch_dim " << old_batch_dim << " old_space_dim "
<< old_space_dim << " old_instr " << old_instr->ToString()
<< "\n new_instr " << new_instr->ToString() << " permute dims "
<< instr_to_dim_permute_map_.count(new_instr) << " old_batch_size "
<< old_batch_size;
CHECK(instr_to_dim_permute_map_.contains(new_instr));
auto permute_dims = instr_to_dim_permute_map_[new_instr];
const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t space_dim = DimLookUp(permute_dims, old_space_dim);
const int64_t spatial_dim_size = new_instr->shape().dimensions(space_dim);
std::vector<int64_t> split_spatial_dimensions(
ctrl_.count_of_dimensions_to_convert);
absl::c_iota(split_spatial_dimensions, space_dim);
TF_ASSIGN_OR_RETURN(new_instr, SplitAndTransposeMergedBatch(
new_instr, batch_dim, old_batch_size,
split_spatial_dimensions));
std::vector<int64_t> new_dimensions(new_instr->shape().dimensions().begin(),
new_instr->shape().dimensions().end());
new_dimensions.erase(new_dimensions.begin() + split_spatial_dimensions[0],
new_dimensions.begin() + split_spatial_dimensions[0] +
ctrl_.count_of_dimensions_to_convert);
for (auto spatial_dimension : split_spatial_dimensions) {
new_dimensions[spatial_dimension] =
spatial_dim_size * ctrl_.number_of_splits;
}
TF_ASSIGN_OR_RETURN(HloInstruction * reshape,
MakeReshapeHlo(new_dimensions, new_instr));
VLOG(1) << "Batch to space reshape " << reshape->ToString();
const int64_t rank = old_instr->shape().rank();
std::vector<int64_t> start_indices(rank, 0),
end_indices(new_dimensions.begin(), new_dimensions.end()),
strides(rank, 1);
for (auto spatial_dimension : split_spatial_dimensions) {
end_indices[spatial_dimension] =
old_instr->shape().dimensions(old_space_dim);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * output_slice,
MakeSliceHlo(reshape, start_indices, end_indices, strides,
&reshape->metadata(), &reshape->frontend_attributes()));
VLOG(1) << "Batch to space slice " << output_slice->ToString();
std::vector<int64_t> transpose_dims(permute_dims);
TF_ASSIGN_OR_RETURN(HloInstruction * output_transpose,
MakeTransposeHlo(output_slice, transpose_dims));
old_instr->SetupDerivedInstruction(output_transpose);
batch_to_space_map_[old_instr] = output_transpose;
return output_transpose;
}
absl::Status ConvolutionVisitor::PropagateOnUsers(HloInstruction* old_conv) {
std::queue<std::pair<HloInstruction*, HloInstruction*>> propagation_worklist;
if (old_conv->user_count() == 0) {
TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space,
BatchToSpace(old_conv));
VLOG(1) << "Replacing the root instruction to "
<< batch_to_space->ToString();
TF_CHECK_OK(computation_->ReplaceInstruction(old_conv, batch_to_space));
VLOG(1) << "Replacement successful";
return absl::OkStatus();
}
int64_t iteration_count = 0;
propagation_worklist.push(
std::make_pair(old_conv, old_conv->mutable_operand(0)));
while (!propagation_worklist.empty()) {
auto top = propagation_worklist.front();
auto node = top.first;
auto parent = top.second;
VLOG(1) << "Traversing for propagation operating on " << node->ToString();
propagation_worklist.pop();
if (old_to_new_instrs_.count(node) > 0 && iteration_count != 0) {
continue;
}
bool needs_further_propagation = true;
if (iteration_count != 0) {
TF_ASSIGN_OR_RETURN(needs_further_propagation, Propagate(node, parent));
}
iteration_count++;
if (node->parent()->root_instruction() == node) {
if (!needs_further_propagation) {
VLOG(1) << "Replacing the root instruction to "
<< old_to_new_instrs_[node]->ToString();
TF_CHECK_OK(
computation_->ReplaceInstruction(node, old_to_new_instrs_[node]));
continue;
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space, BatchToSpace(node));
VLOG(1) << "Replacing the root instruction to "
<< batch_to_space->ToString();
TF_CHECK_OK(computation_->ReplaceInstruction(node, batch_to_space));
} else {
if (!needs_further_propagation) {
TF_CHECK_OK(
computation_->ReplaceInstruction(node, old_to_new_instrs_[node]));
continue;
}
HloInstructionSet unsupported_users;
for (auto user : node->users()) {
if (!SupportedOpForPropagation(user, node)) {
VLOG(1) << "Unsupported op found " << user->ToString();
unsupported_users.insert(user);
continue;
}
if (CanPropagate(user, node)) {
non_propagatable_instrs_.erase(user);
propagation_worklist.push(std::make_pair(user, node));
} else {
non_propagatable_instrs_.insert(user);
}
}
if (!unsupported_users.empty()) {
TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space,
BatchToSpace(node));
for (auto user : unsupported_users) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) == node) {
TF_CHECK_OK(user->ReplaceOperandWith(i, batch_to_space));
}
}
}
}
}
}
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnConv(HloInstruction* convolution) {
auto activations_old = convolution->mutable_operand(0);
CHECK(old_to_new_instrs_.contains(activations_old));
auto activations_new = old_to_new_instrs_[activations_old];
auto permute_dims = instr_to_dim_permute_map_[activations_new];
auto original_conv_dims = convolution->convolution_dimension_numbers();
auto old_new_dims = GetSpatialDimsToSplit(activations_old);
std::vector<int64_t> old_spatial_dims = old_new_dims.first;
std::vector<int64_t> new_spatial_dims = old_new_dims.second;
auto permuted_conv_dims_numbers = original_conv_dims;
int64_t activations_batch_dim =
DimLookUp(permute_dims, original_conv_dims.input_batch_dimension());
int64_t activations_feature_dim =
DimLookUp(permute_dims, original_conv_dims.input_feature_dimension());
permuted_conv_dims_numbers.set_input_batch_dimension(activations_batch_dim);
permuted_conv_dims_numbers.set_input_feature_dimension(
activations_feature_dim);
for (int64_t i = 0; i < original_conv_dims.input_spatial_dimensions_size();
++i) {
permuted_conv_dims_numbers.set_input_spatial_dimensions(
i, DimLookUp(permute_dims,
original_conv_dims.input_spatial_dimensions(i)));
}
const int64_t old_batch_dim = original_conv_dims.input_batch_dimension();
const int64_t old_batch_size =
activations_old->shape().dimensions(old_batch_dim);
ConvDetails c =
GetConvolutionDetails(convolution, permuted_conv_dims_numbers);
VLOG(1) << "Propagating on conv activations_batch_dim "
<< activations_batch_dim << " spatial_dimension_to_split "
<< c.spatial_dimensions_to_split[0] << " old_batch_size "
<< old_batch_size;
TF_ASSIGN_OR_RETURN(
auto retval,
BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers,
activations_batch_dim, &new_spatial_dims));
activations_new = retval.instr;
std::vector<int64_t> trans_dims = retval.transpose_dims;
CHECK(!trans_dims.empty());
auto select_val = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(activations_new->shape().element_type())),
&convolution->metadata(), &convolution->frontend_attributes());
TF_ASSIGN_OR_RETURN(
activations_new,
SelectValidPortion(activations_new, activations_old, select_val,
activations_batch_dim, new_spatial_dims, old_batch_dim,
old_spatial_dims));
auto new_dim_numbers = permuted_conv_dims_numbers;
const int64_t num_splits = ctrl_.number_of_splits;
const int64_t output_offsets = convolution->shape().dimensions(
permuted_conv_dims_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution)));
const int64_t output_offsets_per_split =
CeilOfRatio(output_offsets, num_splits);
int64_t spatial_split_size =
CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride;
VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size
<< " spatial_split_size " << spatial_split_size;
while (spatial_split_size * num_splits + c.halo_size - c.spatial_size < 0 ||
spatial_split_size < c.halo_size - c.inherent_low_padding) {
spatial_split_size += c.stride;
}
VLOG(1) << "Modified spatial_split_size " << spatial_split_size;
const int64_t new_space_size =
activations_new->shape().dimensions(new_spatial_dims[0]);
int64_t slice_size = spatial_split_size + c.halo_size;
if (spatial_split_size > new_space_size) {
TF_ASSIGN_OR_RETURN(
activations_new,
ChangeSpatialSizeOnSpaceToBatchedShape(
activations_new, activations_batch_dim, old_batch_size,
new_spatial_dims, spatial_split_size,
true));
} else {
if (spatial_split_size < new_space_size) {
VLOG(3)
<< "Decreasing the spatial size while propagating spatial_split_size "
<< spatial_split_size << " new_space_size " << new_space_size;
if (new_space_size % c.stride != 0 || c.base_dilation_factor != 1) {
TF_ASSIGN_OR_RETURN(
activations_new,
ChangeSpatialSizeOnSpaceToBatchedShape(
activations_new, activations_batch_dim, old_batch_size,
new_spatial_dims, spatial_split_size));
} else {
const int64_t additional_space_present = spatial_split_size % c.stride;
spatial_split_size = new_space_size;
slice_size =
spatial_split_size + std::max(c.kernel_spatial_dim_size - c.stride -
additional_space_present,
static_cast<int64_t>(0));
}
}
}
TF_ASSIGN_OR_RETURN(
activations_new,
HaloDuplicateWithSlice(
activations_new, new_spatial_dims, activations_batch_dim,
c.base_dilation_factor != 1 &&
c.inherent_low_padding != 0
? (c.inherent_low_padding == c.base_dilation_factor ? 1 : 0)
: c.inherent_low_padding,
slice_size - spatial_split_size));
const int64_t rank = (convolution->shape().rank());
std::vector<int64_t> transpose_dims(rank);
int dim_count = 0;
std::map<int64_t, int64_t> dim_translator;
for (int j = 0;
j < permuted_conv_dims_numbers.output_spatial_dimensions_size(); ++j) {
if (j == GetFirstChosenSpatialDim(convolution)) {
dim_translator[permuted_conv_dims_numbers.output_batch_dimension()] =
dim_count;
new_dim_numbers.set_output_batch_dimension(dim_count++);
}
dim_translator[permuted_conv_dims_numbers.output_spatial_dimensions(j)] =
dim_count;
new_dim_numbers.set_output_spatial_dimensions(j, dim_count);
dim_count++;
}
dim_translator[permuted_conv_dims_numbers.output_feature_dimension()] =
dim_count;
new_dim_numbers.set_output_feature_dimension(dim_count);
int p = 0;
for (const auto& entry : dim_translator) {
transpose_dims[p] = entry.second;
p++;
}
auto new_window = convolution->window();
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
new_window.mutable_dimensions(first_dim + i)
->set_padding_high(c.high_padding_for_conv);
new_window.mutable_dimensions(first_dim + i)
->set_padding_low(c.low_padding_for_conv);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_conv,
MakeConvolveHlo(
activations_new, convolution->mutable_operand(1),
convolution->feature_group_count(), convolution->batch_group_count(),
new_window, new_dim_numbers, convolution->precision_config(),
convolution->shape().element_type()));
convolution->SetupDerivedInstruction(new_conv);
old_to_new_instrs_[convolution] = new_conv;
VLOG(1) << "Space-to-batched convolution " << new_conv->ToString();
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =
original_conv_dims.output_batch_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
original_conv_dims.output_feature_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =
original_conv_dims.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
instr_to_dim_map_[convolution] = dim_map;
instr_to_dim_permute_map_[new_conv] = std::vector<int64_t>(transpose_dims);
convs_to_visit_.erase(convolution);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnConcat(HloInstruction* concat) {
auto first_operand = old_to_new_instrs_[concat->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
const int64_t new_concat_dim =
DimLookUp(permute_dims, concat->concatenate_dimension());
std::vector<HloInstruction*> new_operands(concat->operand_count());
for (int64_t i = 0; i < concat->operand_count(); ++i) {
new_operands[i] = old_to_new_instrs_[concat->mutable_operand(i)];
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_concat,
MakeConcatHlo(new_operands, new_concat_dim, &concat->metadata(),
&concat->frontend_attributes()));
old_to_new_instrs_[concat] = new_concat;
instr_to_dim_map_[concat] =
std::vector<int64_t>(instr_to_dim_map_[concat->mutable_operand(0)]);
instr_to_dim_permute_map_[new_concat] =
std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnReverse(HloInstruction* reverse) {
auto first_operand = old_to_new_instrs_[reverse->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
std::vector<int64_t> new_reverse_dimensions(reverse->dimensions().size());
int dim_count = 0;
for (auto dim : reverse->dimensions()) {
new_reverse_dimensions[dim_count++] = DimLookUp(permute_dims, dim);
}
TF_ASSIGN_OR_RETURN(HloInstruction * new_reverse,
MakeReverseHlo(first_operand, new_reverse_dimensions));
old_to_new_instrs_[reverse] = new_reverse;
instr_to_dim_map_[reverse] =
std::vector<int64_t>(instr_to_dim_map_[reverse->mutable_operand(0)]);
instr_to_dim_permute_map_[new_reverse] =
std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnPad(HloInstruction* pad) {
auto first_operand = old_to_new_instrs_[pad->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
PaddingConfig padding_config;
for (int i = 0; i < pad->shape().rank(); ++i) {
auto dimension = padding_config.add_dimensions();
const int64_t old_dim = ReverseDimLookUp(permute_dims, i);
auto old_padding = pad->padding_config().dimensions(old_dim);
dimension->set_edge_padding_low(old_padding.edge_padding_low());
dimension->set_edge_padding_high(old_padding.edge_padding_high());
dimension->set_interior_padding(old_padding.interior_padding());
}
HloInstruction* padding = pad->mutable_operand(1);
TF_ASSIGN_OR_RETURN(auto new_pad,
MakePadHlo(first_operand, padding, padding_config,
&first_operand->metadata(),
&first_operand->frontend_attributes()));
old_to_new_instrs_[pad] = new_pad;
instr_to_dim_map_[pad] =
std::vector<int64_t>(instr_to_dim_map_[pad->mutable_operand(0)]);
instr_to_dim_permute_map_[new_pad] =
std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnSlice(HloInstruction* slice) {
auto operand = old_to_new_instrs_[slice->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[operand];
DimensionVector starts(slice->shape().rank());
DimensionVector limits(slice->shape().rank());
DimensionVector strides(slice->shape().rank());
for (int i = 0; i < slice->shape().rank(); ++i) {
const int64_t old_dim = ReverseDimLookUp(permute_dims, i);
if (slice->shape().dimensions(old_dim) ==
slice->operand(0)->shape().dimensions(old_dim)) {
starts[i] = 0;
strides[i] = 1;
limits[i] = operand->shape().dimensions(i);
continue;
}
starts[i] = slice->slice_starts(old_dim);
strides[i] = slice->slice_strides(old_dim);
limits[i] = slice->slice_limits(old_dim);
}
TF_ASSIGN_OR_RETURN(
auto new_slice,
MakeSliceHlo(operand, starts, limits, strides, &operand->metadata(),
&operand->frontend_attributes()));
old_to_new_instrs_[slice] = new_slice;
instr_to_dim_map_[slice] =
std::vector<int64_t>(instr_to_dim_map_[slice->mutable_operand(0)]);
instr_to_dim_permute_map_[new_slice] =
std::vector<int64_t>(instr_to_dim_permute_map_[operand]);
return absl::OkStatus();
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::TransposeAndMergeBatch(
HloInstruction* activations,
absl::Span<const int64_t> final_split_spatial_dim_positioning,
int64_t activations_batch_dim, int64_t old_batch_size) {
const int64_t spatial_dim_count = final_split_spatial_dim_positioning.size();
if (final_split_spatial_dim_positioning.size() > 1) {
int64_t start_batch_dim_position = activations_batch_dim + 1;
int64_t start_space_dim_position =
start_batch_dim_position + spatial_dim_count;
std::vector<int64_t> trans_dims(activations->shape().dimensions_size());
absl::c_iota(trans_dims, 0);
for (int i = 0; i < spatial_dim_count; ++i) {
trans_dims[start_batch_dim_position + i] =
start_batch_dim_position + (spatial_dim_count - 1 - i) * 2;
trans_dims[start_space_dim_position + i] =
start_batch_dim_position + i * 2 + 1;
}
TF_ASSIGN_OR_RETURN(activations, MakeTransposeHlo(activations, trans_dims));
}
std::vector<int64_t> batch_collapse_reshape_dims(
activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
const int64_t collapsed_batch_size =
old_batch_size * IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count);
batch_collapse_reshape_dims.erase(
batch_collapse_reshape_dims.begin() + activations_batch_dim,
batch_collapse_reshape_dims.begin() + activations_batch_dim +
spatial_dim_count);
batch_collapse_reshape_dims[activations_batch_dim] = collapsed_batch_size;
TF_ASSIGN_OR_RETURN(HloInstruction * batch_collapsed_reshape,
MakeReshapeHlo(batch_collapse_reshape_dims, activations));
return batch_collapsed_reshape;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::PerformSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t spatial_split_size,
int64_t num_splits) {
const int64_t old_batch_size =
activations->shape().dimensions(activations_batch_dim);
std::vector<int64_t> reshape_dimensions(
activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {
reshape_dimensions[spatial_dimension_to_split] = spatial_split_size;
}
int counter = 0;
for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {
reshape_dimensions.insert(
reshape_dimensions.begin() + (spatial_dimension_to_split + counter),
num_splits);
counter++;
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_increased_reshape,
MakeReshapeHlo(reshape_dimensions, activations));
return TransposeAndMergeBatch(
batch_increased_reshape,
spatial_dimensions_to_split,
activations_batch_dim, old_batch_size);
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::PadAndSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits) {
const int64_t old_batch_size =
activations->shape().dimensions(activations_batch_dim);
if (high_padding || low_padding) {
PaddingConfig padding_config =
MakeNoPaddingConfig(activations->shape().dimensions_size());
for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {
padding_config.mutable_dimensions(spatial_dimension_to_split)
->set_edge_padding_high(high_padding);
padding_config.mutable_dimensions(spatial_dimension_to_split)
->set_edge_padding_low(low_padding);
}
HloInstruction* padding = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(activations->shape().element_type())),
&activations->metadata(), &activations->frontend_attributes());
TF_ASSIGN_OR_RETURN(activations,
MakePadHlo(activations, padding, padding_config,
&activations->metadata(),
&activations->frontend_attributes()));
}
VLOG(1) << "Initial padded activations shape "
<< activations->shape().ToString() << " old_batch_size "
<< old_batch_size << " activations_batch_dim "
<< activations_batch_dim;
return PerformSplitSpace(activations, spatial_dimensions_to_split,
activations_batch_dim, spatial_split_size,
num_splits);
}
absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>>
ConvolutionVisitor::SplitSpace(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits,
std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop,
bool is_rhs) {
TF_ASSIGN_OR_RETURN(
auto retval,
BringSpaceNextToBatch(activations, dim_numbers, activations_batch_dim,
spatial_dimensions_to_split, is_backprop, is_rhs));
activations = retval.instr;
std::vector<int64_t> transpose_dims = retval.transpose_dims;
TF_ASSIGN_OR_RETURN(
auto new_activations,
PadAndSplitSpace(activations, *spatial_dimensions_to_split,
activations_batch_dim, high_padding, low_padding,
spatial_split_size, num_splits));
return std::make_pair(new_activations, transpose_dims);
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::PropagateOnConstant(
HloInstruction* consumer, HloInstruction* producer) {
CHECK(old_to_new_instrs_.contains(producer));
HloInstruction* new_producer = old_to_new_instrs_[producer];
auto prod_transpose_dims = instr_to_dim_permute_map_[new_producer];
std::vector<int64_t> reversed_transpose_dims(prod_transpose_dims.size());
for (int64_t i = 0; i < prod_transpose_dims.size(); ++i) {
reversed_transpose_dims[i] = ReverseDimLookUp(prod_transpose_dims, i);
}
TF_ASSIGN_OR_RETURN(consumer,
MakeTransposeHlo(consumer, reversed_transpose_dims));
auto retval = GetSpatialDimsToSplit(producer);
std::vector<int64_t> old_spatial_dims = retval.first;
std::vector<int64_t> new_spatial_dims = retval.second;
auto dim_map = instr_to_dim_map_[producer];
const int64_t old_batch_dim = dim_map[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = old_spatial_dims[0];
const int64_t new_batch_dim = DimLookUp(prod_transpose_dims, old_batch_dim);
const int64_t new_space_dim = new_spatial_dims[0];
const int64_t old_batch_size = producer->shape().dimensions(old_batch_dim);
const int64_t new_batch_size = old_batch_size * ctrl_.number_of_splits;
const int64_t high_padding =
(new_batch_size * new_producer->shape().dimensions(new_space_dim) -
old_batch_size * producer->shape().dimensions(old_space_dim)) /
old_batch_size;
auto new_consumer = PadAndSplitSpace(
consumer, new_spatial_dims, new_batch_dim, high_padding,
0, new_producer->shape().dimensions(new_space_dim),
ctrl_.number_of_splits);
return new_consumer;
}
absl::Status ConvolutionVisitor::PropagateOnBackpropFilterConv(
HloInstruction* convolution) {
auto activations_old = convolution->mutable_operand(0);
const int64_t rhs_dilation =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
auto original_conv_dims = convolution->convolution_dimension_numbers();
std::vector<int64_t> old_split_spatial_dims(
ctrl_.dimension_from_end_to_convert),
old_split_kernel_spatial_dims(ctrl_.dimension_from_end_to_convert);
for (int i = 0; i < ctrl_.dimension_from_end_to_convert; ++i) {
old_split_spatial_dims[i] = original_conv_dims.input_spatial_dimensions(
GetFirstChosenSpatialDim(convolution) + i);
old_split_kernel_spatial_dims[i] =
original_conv_dims.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution) + i);
}
auto kernel_old = convolution->mutable_operand(1);
const int64_t old_kernel_split_dim_size =
kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]);
int64_t old_split_dim_size =
activations_old->shape().dimensions(old_split_spatial_dims[0]);
int64_t old_batch_dim = original_conv_dims.input_feature_dimension();
int64_t kernel_old_batch_dim =
original_conv_dims.kernel_input_feature_dimension();
const int64_t old_batch_size =
activations_old->shape().dimensions(old_batch_dim);
CHECK(old_to_new_instrs_.contains(kernel_old) ||
old_to_new_instrs_.contains(activations_old));
HloInstruction* activations_new = nullptr;
HloInstruction* kernel_new = nullptr;
bool activations_locally_space_to_batched = false;
bool kernel_locally_space_to_batched = false;
std::vector<int64_t> permute_dims_kernel, permute_dims;
if (old_to_new_instrs_.contains(activations_old)) {
activations_new = old_to_new_instrs_[activations_old];
permute_dims = instr_to_dim_permute_map_[activations_new];
}
if (old_to_new_instrs_.contains(kernel_old)) {
kernel_new = old_to_new_instrs_[kernel_old];
permute_dims_kernel = instr_to_dim_permute_map_[kernel_new];
}
if (!old_to_new_instrs_.contains(activations_old)) {
kernel_new = old_to_new_instrs_[kernel_old];
permute_dims_kernel = instr_to_dim_permute_map_[kernel_new];
VLOG(1) << "Space-to-batching activations to enable space-to-depth";
const int64_t new_kernel_space_dim =
DimLookUp(permute_dims_kernel, old_split_kernel_spatial_dims[0]);
const int64_t new_kernel_split_dim_size =
kernel_new->shape().dimensions(new_kernel_space_dim);
const int64_t needed_spatial_size =
rhs_dilation * new_kernel_split_dim_size;
const int64_t pad_size =
needed_spatial_size * ctrl_.number_of_splits - old_split_dim_size;
ConvolutionDimensionNumbers tmp_dim_numbers;
tmp_dim_numbers = original_conv_dims;
TF_ASSIGN_OR_RETURN(
auto retval, SplitSpace(activations_old, tmp_dim_numbers, old_batch_dim,
pad_size, 0,
needed_spatial_size, ctrl_.number_of_splits,
&old_split_spatial_dims,
true));
activations_new = retval.first;
std::vector<int64_t> reversed_transpose_dims(retval.second.size());
for (int64_t i = 0; i < retval.second.size(); ++i) {
reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i);
}
permute_dims = reversed_transpose_dims;
VLOG(3) << "New Activations " << retval.first->ToString();
activations_locally_space_to_batched = true;
} else if (!old_to_new_instrs_.contains(kernel_old)) {
activations_new = old_to_new_instrs_[activations_old];
permute_dims = instr_to_dim_permute_map_[activations_new];
VLOG(1) << "Space-to-batching kernel to enable space-to-depth";
const int64_t new_space_dim =
DimLookUp(permute_dims, old_split_spatial_dims[0]);
const int64_t new_split_dim_size =
activations_new->shape().dimensions(new_space_dim);
const int64_t needed_spatial_size =
CeilOfRatio(new_split_dim_size, rhs_dilation);
int64_t old_kernel_split_dim_size =
kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]);
const int64_t pad_size = needed_spatial_size * ctrl_.number_of_splits -
old_kernel_split_dim_size;
ConvolutionDimensionNumbers tmp_dim_numbers;
tmp_dim_numbers = original_conv_dims;
TF_ASSIGN_OR_RETURN(
auto retval,
SplitSpace(kernel_old, tmp_dim_numbers, kernel_old_batch_dim,
pad_size, 0,
needed_spatial_size, ctrl_.number_of_splits,
&old_split_kernel_spatial_dims,
true, true));
kernel_new = retval.first;
std::vector<int64_t> reversed_transpose_dims(retval.second.size());
for (int64_t i = 0; i < retval.second.size(); ++i) {
reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i);
}
permute_dims_kernel = reversed_transpose_dims;
VLOG(3) << "New kernel " << retval.first->ToString();
kernel_locally_space_to_batched = true;
}
CHECK_NE(activations_new, nullptr);
CHECK_NE(kernel_new, nullptr);
const int64_t new_spatial_dimension =
activations_new->shape().dimensions_size();
auto permuted_conv_dims_numbers = original_conv_dims;
int64_t activations_batch_dim =
DimLookUp(permute_dims, original_conv_dims.input_feature_dimension());
int64_t activations_feature_dim =
DimLookUp(permute_dims, original_conv_dims.input_batch_dimension());
const int64_t previous_spatial_dim_count =
original_conv_dims.input_spatial_dimensions_size();
for (int64_t i = 0; i < previous_spatial_dim_count; ++i) {
permuted_conv_dims_numbers.set_input_spatial_dimensions(
i, DimLookUp(permute_dims,
original_conv_dims.input_spatial_dimensions(i)));
permuted_conv_dims_numbers.set_kernel_spatial_dimensions(
i, DimLookUp(permute_dims_kernel,
original_conv_dims.kernel_spatial_dimensions(i)));
}
permuted_conv_dims_numbers.add_input_spatial_dimensions(
new_spatial_dimension);
permuted_conv_dims_numbers.add_kernel_spatial_dimensions(
new_spatial_dimension);
permuted_conv_dims_numbers.add_output_spatial_dimensions(
new_spatial_dimension);
const int64_t previous_chosen_spatial_dim_in_output =
permuted_conv_dims_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
permuted_conv_dims_numbers.set_output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution), new_spatial_dimension);
permuted_conv_dims_numbers.set_output_spatial_dimensions(
previous_spatial_dim_count, previous_chosen_spatial_dim_in_output);
const int64_t kernel_input_feature_dim = DimLookUp(
permute_dims_kernel, original_conv_dims.kernel_input_feature_dimension());
const int64_t kernel_output_feature_dim =
DimLookUp(permute_dims_kernel,
original_conv_dims.kernel_output_feature_dimension());
permuted_conv_dims_numbers.set_kernel_input_feature_dimension(
kernel_input_feature_dim);
permuted_conv_dims_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dim);
std::vector<int64_t> spatial_dimensions_to_split(
ctrl_.count_of_dimensions_to_convert);
const int64_t first_dim_to_split = GetFirstChosenSpatialDim(convolution);
for (int64_t i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
spatial_dimensions_to_split[i] =
permuted_conv_dims_numbers.input_spatial_dimensions(first_dim_to_split +
i);
}
const int64_t kernel_spatial_dimension_to_split =
permuted_conv_dims_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
int64_t new_split_dim_size =
activations_new->shape().dimensions(spatial_dimensions_to_split[0]);
const int64_t kernel_new_split_dim_size =
kernel_new->shape().dimensions(kernel_spatial_dimension_to_split);
permuted_conv_dims_numbers.set_input_batch_dimension(activations_feature_dim);
permuted_conv_dims_numbers.set_input_feature_dimension(activations_batch_dim);
VLOG(1) << "Propagating on conv activations_batch_dim "
<< activations_batch_dim << " spatial_dimension_to_split "
<< spatial_dimensions_to_split[0] << " old_batch_size "
<< old_batch_size << " new_split_dim_size " << new_split_dim_size;
TF_ASSIGN_OR_RETURN(
auto retval,
BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers,
activations_batch_dim, &spatial_dimensions_to_split,
true));
int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0];
std::vector<int64_t> transpose_dims = retval.transpose_dims;
CHECK(!transpose_dims.empty());
activations_new = retval.instr;
VLOG(1) << "Activations_new post BringSpaceNextToBatch "
<< activations_new->ToString();
VLOG(1) << "activations_batch_dim " << activations_batch_dim
<< " activations_feature_dim " << activations_feature_dim;
const int64_t expected_split_dim_size =
rhs_dilation * kernel_new_split_dim_size;
if (new_split_dim_size != expected_split_dim_size) {
CHECK_LT(new_split_dim_size, expected_split_dim_size);
new_split_dim_size = expected_split_dim_size;
TF_ASSIGN_OR_RETURN(
activations_new,
ChangeSpatialSizeOnSpaceToBatchedShape(
activations_new, activations_batch_dim, old_batch_size,
spatial_dimensions_to_split, new_split_dim_size, true));
}
spatial_dimension_to_split = spatial_dimensions_to_split[0];
auto select_val = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(activations_new->shape().element_type())),
&activations_new->metadata(), &activations_new->frontend_attributes());
if (!activations_locally_space_to_batched) {
TF_ASSIGN_OR_RETURN(
activations_new,
SelectValidPortion(activations_new, activations_old, select_val,
activations_batch_dim, spatial_dimensions_to_split,
old_batch_dim, old_split_spatial_dims));
}
if (!kernel_locally_space_to_batched) {
VLOG(3) << "Selecting the valid kernel area";
std::vector<int64_t> new_kernel_split_spatial_dims(
ctrl_.dimension_from_end_to_convert);
new_kernel_split_spatial_dims[0] = kernel_spatial_dimension_to_split;
TF_ASSIGN_OR_RETURN(
kernel_new,
SelectValidPortion(kernel_new, kernel_old, select_val,
kernel_input_feature_dim,
new_kernel_split_spatial_dims,
original_conv_dims.kernel_input_feature_dimension(),
old_split_kernel_spatial_dims));
}
auto new_dim_numbers = permuted_conv_dims_numbers;
VLOG(2) << "New dim numbers " << new_dim_numbers.DebugString();
const int64_t inherent_low_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
const int64_t inherent_high_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_high();
std::vector<HloInstruction*> activations_chunks;
for (int64_t i = 0; i < inherent_low_padding; ++i) {
HloInstruction* activations_to_use = nullptr;
if (i == 0) {
activations_to_use = activations_new;
} else {
activations_to_use = activations_chunks.back();
}
TF_ASSIGN_OR_RETURN(
HloInstruction * activations_slice,
HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split,
activations_batch_dim, 1,
0));
activations_chunks.push_back(activations_slice);
}
absl::c_reverse(activations_chunks);
const int64_t expanded_kernel =
old_kernel_split_dim_size * rhs_dilation - (rhs_dilation - 1);
const int64_t overlap_count =
old_split_dim_size - expanded_kernel + 1 +
(inherent_low_padding < 0 ? inherent_low_padding : 0) +
(inherent_high_padding < 0 ? inherent_high_padding : 0);
VLOG(1) << "overlap_count " << overlap_count << " inherent_low_padding "
<< inherent_low_padding << " inherent_high_padding "
<< inherent_high_padding;
const int64_t total_overlap_count =
overlap_count + (inherent_low_padding > 0 ? inherent_low_padding : 0) +
(inherent_high_padding > 0 ? inherent_high_padding : 0);
for (int64_t i = 0; i < overlap_count; ++i) {
HloInstruction* activations_to_use = nullptr;
HloInstruction* activations_slice = nullptr;
if (i == 0) {
activations_to_use = activations_new;
if (inherent_low_padding < 0) {
TF_ASSIGN_OR_RETURN(
activations_slice,
HaloDuplicateWithSlice(
activations_to_use, spatial_dimensions_to_split,
activations_batch_dim,
inherent_low_padding, 0));
} else {
activations_slice = activations_to_use;
}
} else {
activations_to_use = activations_chunks.back();
TF_ASSIGN_OR_RETURN(activations_slice,
HaloDuplicateWithSlice(
activations_to_use, spatial_dimensions_to_split,
activations_batch_dim, -1,
0));
}
activations_chunks.push_back(activations_slice);
}
int64_t high_padding_to_materialize = 0;
if (inherent_high_padding > 0) {
high_padding_to_materialize =
std::max(total_overlap_count -
(std::max(overlap_count, static_cast<int64_t>(0)) +
std::max(inherent_low_padding, static_cast<int64_t>(0))),
static_cast<int64_t>(0));
}
for (int64_t i = 0; i < high_padding_to_materialize; ++i) {
HloInstruction* activations_to_use = nullptr;
activations_to_use = activations_chunks.back();
TF_ASSIGN_OR_RETURN(
HloInstruction * activations_slice,
HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split,
activations_batch_dim,
-1, 0));
activations_chunks.push_back(activations_slice);
}
for (int64_t i = 0; i < activations_chunks.size(); ++i) {
std::vector<int64_t> input_sizes(
activations_chunks[i]->shape().dimensions().begin(),
activations_chunks[i]->shape().dimensions().end());
input_sizes.push_back(1);
TF_ASSIGN_OR_RETURN(activations_chunks[i],
MakeReshapeHlo(input_sizes, activations_chunks[i]));
VLOG(1) << "new_spatial_dimension " << new_spatial_dimension << " slice "
<< activations_chunks[i]->ToString();
}
TF_ASSIGN_OR_RETURN(
activations_new,
MakeConcatHlo(absl::MakeSpan(activations_chunks), new_spatial_dimension,
&activations_old->metadata(),
&activations_old->frontend_attributes()));
std::vector<int64_t> kernel_sizes(kernel_new->shape().dimensions().begin(),
kernel_new->shape().dimensions().end());
kernel_sizes.push_back(1);
TF_ASSIGN_OR_RETURN(kernel_new, MakeReshapeHlo(kernel_sizes, kernel_new));
auto new_window = convolution->window();
new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))
->set_padding_high(-(rhs_dilation - 1));
new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))
->set_padding_low(0);
new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))
->set_size(CeilOfRatio(new_split_dim_size, rhs_dilation));
auto window_dim = new_window.add_dimensions();
window_dim->set_base_dilation(1);
window_dim->set_size(1);
int64_t stride = 1;
if (inherent_low_padding > total_overlap_count) {
stride = activations_chunks.size();
}
window_dim->set_stride(stride);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * new_conv,
MakeConvolveHlo(
activations_new, kernel_new, convolution->feature_group_count(),
convolution->batch_group_count(), new_window, new_dim_numbers,
convolution->precision_config(),
convolution->shape().element_type()));
convolution->SetupDerivedInstruction(new_conv);
VLOG(2) << "New backprop filter convolution " << new_conv->ToString();
std::vector<int64_t> output_sizes(new_conv->shape().dimensions().begin(),
new_conv->shape().dimensions().end());
output_sizes.erase(output_sizes.begin() +
new_dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution)));
TF_ASSIGN_OR_RETURN(new_conv, MakeReshapeHlo(output_sizes, new_conv));
old_to_new_instrs_[convolution] = new_conv;
VLOG(1) << "Space-to-featured convolution " << new_conv->ToString();
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =
original_conv_dims.output_batch_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
original_conv_dims.output_feature_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =
original_conv_dims.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
instr_to_dim_map_[convolution] = dim_map;
std::vector<int64_t> trans_dims(convolution->shape().dimensions_size());
absl::c_iota(trans_dims, 0);
instr_to_dim_permute_map_[new_conv] = trans_dims;
return absl::OkStatus();
}
HloInstruction*
ConvolutionVisitor::DoesConvolutionFeedReduceWindowOrSelectAndScatter(
HloInstruction* instr, int64_t depth = kReduceWindowSearchDepth) {
if (depth == 0) {
return nullptr;
}
for (auto user : instr->users()) {
if (user->opcode() == HloOpcode::kReduceWindow ||
user->opcode() == HloOpcode::kSelectAndScatter) {
return user;
}
if (user->opcode() == HloOpcode::kConvolution ||
user->opcode() == HloOpcode::kPad ||
user->opcode() == HloOpcode::kTranspose ||
user->opcode() == HloOpcode::kDot) {
continue;
}
auto ret =
DoesConvolutionFeedReduceWindowOrSelectAndScatter(user, depth - 1);
if (ret != nullptr) {
return ret;
}
}
return nullptr;
}
bool ConvolutionVisitor::DoesConvolutionFeedUnpropagatableOp(
HloInstruction* instr, int64_t depth) {
auto key = std::make_pair(instr, depth);
if (unpropagatability_cache_.contains(key)) {
return unpropagatability_cache_[key];
}
if (depth == 0 || instr->user_count() == 0) {
unpropagatability_cache_[key] = false;
return false;
}
for (auto user : instr->users()) {
if (IsOpcodeNonPropagatable(user)) {
unpropagatability_cache_[key] = true;
return true;
}
int64_t depth_to_use = depth;
if (user->opcode() == HloOpcode::kConvolution ||
user->opcode() == HloOpcode::kDot) {
depth_to_use--;
}
if (DoesConvolutionFeedUnpropagatableOp(user, depth_to_use)) {
unpropagatability_cache_[key] = true;
return true;
}
}
unpropagatability_cache_[key] = false;
return false;
}
bool ConvolutionVisitor::IsSpaceToBatchedSpaceSizeSuitable(
HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kSelectAndScatter ||
instr->opcode() == HloOpcode::kReduceWindow);
auto old_producer = instr->mutable_operand(0);
auto dim_map_val_op = instr_to_dim_map_[old_producer];
const int64_t old_space_dim =
dim_map_val_op[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto first_operand = old_to_new_instrs_[old_producer];
auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];
const int64_t new_space_dim =
DimLookUp(permute_dims_first_operand, old_space_dim);
const int64_t window_size = instr->window().dimensions(old_space_dim).size();
if (first_operand->shape().dimensions(new_space_dim) < window_size) {
return false;
}
return true;
}
ConvolutionVisitor::ConvDetails ConvolutionVisitor::GetConvolutionDetails(
HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {
auto activations = convolution->mutable_operand(0);
auto kernel = convolution->mutable_operand(1);
const auto& kernel_shape = kernel->shape();
const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
int64_t kernel_spatial_dim_size = kernel_shape.dimensions(kernel_spatial_dim);
if (IsForwardWindowDilatedConv(convolution, dim_numbers)) {
const int64_t window_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
kernel_spatial_dim_size =
(kernel_spatial_dim_size - 1) * (window_dilation_factor - 1) +
kernel_spatial_dim_size;
}
std::vector<int64_t> spatial_dimensions_to_split =
GetChosenSpatialDims(convolution);
const int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0];
const int64_t input_dim_size =
activations->shape().dimensions(spatial_dimension_to_split);
const int64_t inherent_low_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
const int64_t inherent_high_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_high();
const int64_t stride = convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.stride();
const int64_t base_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.base_dilation();
bool is_base_dilated = base_dilation_factor > 1;
const int64_t spatial_size = input_dim_size +
(is_base_dilated ? 0 : inherent_low_padding) +
inherent_high_padding;
const int64_t last_overlap = base_dilation_factor == inherent_low_padding
? kernel_spatial_dim_size
: kernel_spatial_dim_size - 1;
const int64_t halo_size = is_base_dilated
? last_overlap / base_dilation_factor
: kernel_spatial_dim_size - 1;
const int64_t high_padding_for_base_dilation =
inherent_low_padding == 0 ? base_dilation_factor - 1
: last_overlap % base_dilation_factor;
const int64_t high_padding_for_conv =
is_base_dilated ? high_padding_for_base_dilation : 0;
const int64_t low_padding_for_conv =
is_base_dilated && (base_dilation_factor != inherent_low_padding)
? inherent_low_padding
: 0;
return ConvDetails{spatial_dimensions_to_split,
inherent_low_padding,
inherent_high_padding,
stride,
spatial_size,
base_dilation_factor,
halo_size,
high_padding_for_conv,
low_padding_for_conv,
kernel_spatial_dim_size,
input_dim_size};
}
absl::Status ConvolutionVisitor::PerformSpaceToBatchOnConvolution(
HloInstruction* convolution) {
if (!ConsumeFuel("space-to-batch-converter", [&] {
return "Skipping space-to-batch propagation because fuel over\n";
})) {
return absl::OkStatus();
}
VLOG(1) << "Handling conv " << convolution->ToString();
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);
int64_t activations_batch_dim = dim_numbers.input_batch_dimension();
auto activations = convolution->mutable_operand(0);
VLOG(1) << "spatial size " << c.spatial_size;
if (c.spatial_size < 2 * ctrl_.number_of_splits) {
return absl::OkStatus();
}
auto original_conv = convolution;
const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
const int64_t output_offsets =
convolution->shape().dimensions(output_spatial_dim);
const int64_t output_offsets_per_split =
CeilOfRatio(output_offsets, ctrl_.number_of_splits);
int64_t spatial_split_size =
CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride;
while (spatial_split_size * ctrl_.number_of_splits - c.spatial_size < 0) {
spatial_split_size += c.stride;
}
auto reduce_window_or_select_and_scatter =
DoesConvolutionFeedReduceWindowOrSelectAndScatter(convolution);
if (reduce_window_or_select_and_scatter != nullptr &&
reduce_window_or_select_and_scatter->shape().IsArray() &&
reduce_window_or_select_and_scatter->shape().rank() ==
convolution->shape().rank()) {
VLOG(2)
<< "DoesConvolutionFeedReduceWindowOrSelectAndScatter returned true";
const int64_t win_stride =
std::max(reduce_window_or_select_and_scatter->window()
.dimensions(output_spatial_dim)
.stride(),
static_cast<int64_t>(1));
CHECK_NE(win_stride, 0)
<< "Bad op " << reduce_window_or_select_and_scatter->ToString();
CHECK_NE(c.stride, 0) << "Bad op " << convolution->ToString();
while ((spatial_split_size / c.stride) % win_stride != 0) {
spatial_split_size += c.stride;
}
}
const int64_t slice_size = spatial_split_size + c.halo_size;
const int64_t low_pad_to_handle_base_dilation =
(c.base_dilation_factor > 1 &&
c.base_dilation_factor == c.inherent_low_padding)
? 1
: 0;
int64_t pad_size =
spatial_split_size * ctrl_.number_of_splits - c.spatial_size;
bool handle_low_pad_in_first_reshape = false;
if (pad_size > low_pad_to_handle_base_dilation) {
pad_size -= low_pad_to_handle_base_dilation;
handle_low_pad_in_first_reshape = true;
}
VLOG(1) << "spatial_split_size " << spatial_split_size << " stride "
<< c.stride << " slice_size " << slice_size;
VLOG(1) << "spatial_dimension_to_split " << c.spatial_dimensions_to_split[0]
<< " num_splits " << ctrl_.number_of_splits
<< " kernel_spatial_dim_size " << c.kernel_spatial_dim_size;
std::vector<int64_t> spatial_dimensions_to_split =
c.spatial_dimensions_to_split;
TF_ASSIGN_OR_RETURN(
auto retval,
SplitSpace(
activations, dim_numbers, activations_batch_dim,
c.inherent_high_padding + pad_size,
c.base_dilation_factor == 1 ? c.inherent_low_padding
: handle_low_pad_in_first_reshape ? low_pad_to_handle_base_dilation
: 0,
spatial_split_size, ctrl_.number_of_splits,
&spatial_dimensions_to_split));
HloInstruction* batch_increased_reshape = retval.first;
convolution->SetupDerivedInstruction(batch_increased_reshape);
VLOG(1) << "First reshape done " << batch_increased_reshape->ToString();
TF_ASSIGN_OR_RETURN(
activations,
HaloDuplicateWithSlice(
batch_increased_reshape, spatial_dimensions_to_split,
activations_batch_dim,
handle_low_pad_in_first_reshape ? 0 : low_pad_to_handle_base_dilation,
c.halo_size));
VLOG(1) << "Batch merge done " << activations->ToString();
auto new_dim_numbers = dim_numbers;
const int64_t rank = convolution->shape().rank();
std::vector<int64_t> transpose_dims(rank);
int dim_count = 0;
std::map<int64_t, int64_t> dim_translator;
for (int j = 0; j < dim_numbers.output_spatial_dimensions_size(); ++j) {
if (j == GetFirstChosenSpatialDim(convolution)) {
dim_translator[dim_numbers.output_batch_dimension()] = dim_count;
new_dim_numbers.set_output_batch_dimension(dim_count++);
}
dim_translator[dim_numbers.output_spatial_dimensions(j)] = dim_count;
new_dim_numbers.set_output_spatial_dimensions(j, dim_count);
dim_count++;
}
dim_translator[dim_numbers.output_feature_dimension()] = dim_count;
new_dim_numbers.set_output_feature_dimension(dim_count);
int p = 0;
for (const auto& entry : dim_translator) {
transpose_dims[p] = entry.second;
p++;
}
VLOG(1) << "New dim numbers " << new_dim_numbers.DebugString()
<< " batch dim " << new_dim_numbers.input_batch_dimension();
auto new_window = convolution->window();
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
new_window.mutable_dimensions(first_dim + i)
->set_padding_high(c.high_padding_for_conv);
new_window.mutable_dimensions(first_dim + i)
->set_padding_low(c.low_padding_for_conv);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_conv,
MakeConvolveHlo(
activations, convolution->mutable_operand(1),
convolution->feature_group_count(), convolution->batch_group_count(),
new_window, new_dim_numbers, convolution->precision_config(),
convolution->shape().element_type(),
&convolution->metadata(), &convolution->frontend_attributes()));
convolution->SetupDerivedInstruction(new_conv);
batch_to_space_map_[convolution->mutable_operand(0)] =
convolution->mutable_operand(0);
VLOG(1) << "Space-to-batched convolution " << new_conv->ToString();
std::vector<int64_t> new_output_split_spatial_dims(
ctrl_.count_of_dimensions_to_convert),
old_output_split_spatial_dims(ctrl_.count_of_dimensions_to_convert);
for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
old_output_split_spatial_dims[i] =
dim_numbers.output_spatial_dimensions(first_dim + i);
new_output_split_spatial_dims[i] =
new_dim_numbers.output_spatial_dimensions(first_dim + i);
}
const int64_t output_batch_dim = new_dim_numbers.output_batch_dimension();
auto select_val = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(new_conv->shape().element_type())),
&convolution->metadata(), &convolution->frontend_attributes());
TF_ASSIGN_OR_RETURN(
new_conv,
SelectValidPortion(new_conv, original_conv, select_val, output_batch_dim,
new_output_split_spatial_dims,
dim_numbers.output_batch_dimension(),
old_output_split_spatial_dims));
old_to_new_instrs_[original_conv] = new_conv;
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =
dim_numbers.output_batch_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
dim_numbers.output_feature_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =
dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
instr_to_dim_map_[original_conv] = dim_map;
instr_to_dim_permute_map_[new_conv] = std::vector<int64_t>(transpose_dims);
if (non_propagatable_instrs_.count(convolution) > 0) {
non_propagatable_instrs_.erase(convolution);
}
TF_CHECK_OK(PropagateOnUsers(original_conv));
return absl::OkStatus();
}
}
absl::StatusOr<bool> SpaceToBatchConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "SpaceToBatchConverter::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
ConvolutionVisitor visitor(ctrl_, comp);
if (visitor.Run().value()) {
changed = true;
}
VLOG(1) << "Done operating on computation";
}
XLA_VLOG_LINES(2,
"SpaceToBatchConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/space_to_batch_converter.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using SpaceToBatchConverterTest = HloTestBase;
namespace op = testing::opcode_matchers;
TEST_F(SpaceToBatchConverterTest, SimpleBatch1) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
ROOT %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 1);
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch1ConvXpose) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
ROOT tr = bf16[1,256,256,32] transpose(%convolution), dimensions={0,2,1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0), op::Select());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch1WithReduceWindow) {
std::string hlo_string = R"(
HloModule module
adder (lhs: bf16[], rhs: bf16[]) -> bf16[] {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
%constant = bf16[3] constant({1.0, 2.0, 3.0})
%tuple = (bf16[1,256,256,32], bf16[3])tuple(%convolution, %constant)
ROOT %gte = bf16[1,256,256,32] get-tuple-element(%tuple), index=0
%gte2 = bf16[3]get-tuple-element(%tuple), index=1
%init = bf16[] constant(1.0)
%reduce-window = bf16[3] reduce-window(bf16[3] %gte2, bf16[] %init),
window={size=1}, to_apply=%adder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch2) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[2,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
ROOT %convolution = bf16[2,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 1});
ASSERT_FALSE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, UnpropagatableOp) {
std::string hlo_string = R"(
HloModule module
ENTRY comp {
%reduce-window = bf16[1,76,76,64]{3,2,1,0} parameter(0)
%convert.13 = bf16[3,3,64,64]{3,2,1,0} parameter(1)
%convolution.1 = bf16[64,76,76,1]{0,2,1,3} convolution(
%reduce-window, %convert.13), window={size=3x3 pad=1_1x1_1},
dim_labels=b01f_01io->f01b
ROOT custom-call.5079 = bf16[64,152,152,1]{0,2,1,3} custom-call(%convolution.1),
custom_call_target="ResizeNearest"
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 1});
ASSERT_FALSE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, Batch1WithStrideAndPad) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,224,224,3]{3,2,1,0} parameter(0)
%p1 = bf16[7,7,3,64]{3,2,1,0} parameter(1)
ROOT %convolution.3 = bf16[1,112,112,64]{3,2,1,0} convolution(%p0, %p1),
window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 4});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);
}
TEST_F(SpaceToBatchConverterTest, Batch1WithBaseDilation) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p2 = bf16[1,28,28,128]{3,0,2,1} parameter(0)
%p3 = bf16[1,1,512,128]{3,2,1,0} parameter(1)
ROOT %c = bf16[1,56,56,512]{3,0,2,1} convolution(%p2, %p3),
window={size=1x1 pad=0_1x0_1 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01oi->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);
}
TEST_F(SpaceToBatchConverterTest, PropagateThroughDot) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
%p2 = bf16[32,32] parameter(2)
ROOT %dot.5010 = bf16[1,256,256,32] dot(%convolution, %p2),
lhs_contracting_dims={3},
rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, PropagateOnTrivialReduce) {
std::string hlo_string = R"(
HloModule module
%region_1.37 (Arg_0.38: f32[], Arg_1.39: f32[]) -> f32[] {
%Arg_0.38 = f32[] parameter(0)
%Arg_1.39 = f32[] parameter(1)
ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39)
}
ENTRY computation {
%p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0)
%p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1)
%c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1),
window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f
%constant.5 = f32[] constant(0)
ROOT %reduce.41 = f32[7,160,400]{2,1,0} reduce(%c, %constant.5), dimensions={3}, to_apply=%region_1.37
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0)->operand(0)->operand(0)->operand(0),
op::Reduce());
auto new_reduce = root->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_EQ(new_reduce->shape().dimensions(1),
7 * 8);
}
TEST_F(SpaceToBatchConverterTest, DoNotPropagateOnTupleReduce) {
std::string hlo_string = R"(
HloModule module
%minmax_func.2717 {
%lhs_value.2718 = f32[] parameter(0)
%rhs_value.2720 = f32[] parameter(2)
%compare.2722 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=GE
%select.2723 = f32[] select(pred[] %compare.2722, f32[] %lhs_value.2718, f32[] %rhs_value.2720)
%compare.2725 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=EQ
%lhs_index.2719 = f32[] parameter(1)
%rhs_index.2721 = f32[] parameter(3)
%minimum.2726 = f32[] minimum(f32[] %lhs_index.2719, f32[] %rhs_index.2721)
%select.2724 = f32[] select(pred[] %compare.2722, f32[] %lhs_index.2719, f32[] %rhs_index.2721)
%select.2727 = f32[] select(pred[] %compare.2725, f32[] %minimum.2726, f32[] %select.2724)
ROOT %tuple.4 = (f32[], f32[]) tuple(f32[] %select.2723, f32[] %select.2727)
}
ENTRY computation {
%p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0)
%p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1)
%c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1),
window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f
%constant.5 = f32[] constant(0)
%constant.6 = f32[] constant(1)
ROOT %reduce.36 = (f32[7,160,400]{2,1,0}, f32[7,160,400]{2,1,0}) reduce(%c, %c,
%constant.5, %constant.6), dimensions={3}, to_apply=%minmax_func.2717
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Reduce());
}
TEST_F(SpaceToBatchConverterTest, ReduceDegenerateDim) {
std::string hlo_string = R"(
HloModule module
%region_42.4982 {
%Arg_0.38 = f32[] parameter(0)
%Arg_1.39 = f32[] parameter(1)
ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39)
}
ENTRY computation {
%p0 = f32[2,1,84,84,3]{4,3,2,1,0} parameter(0)
%p1 = f32[3,3,3,3,32]{4,3,2,1,0} parameter(1)
%constant.10559 = f32[] constant(0)
%convolution.98 = f32[2,1,84,84,32]{4,3,2,1,0} convolution(%p0, %p1),
window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
ROOT %reduce.2606 = f32[2,84,84]{2,1,0} reduce(f32[2,1,84,84,32]{4,3,2,1,0}
%convolution.98, f32[] %constant.10559), dimensions={1,4}, to_apply=%region_42.4982
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
}
TEST_F(SpaceToBatchConverterTest, PropagateOnReduce) {
std::string hlo_string = R"(
HloModule xla_computation_unknown.14
region_0.134 {
Arg_0.135 = f32[] parameter(0)
Arg_1.136 = f32[] parameter(1)
ROOT add.137 = f32[] add(Arg_0.135, Arg_1.136)
}
ENTRY main.140 {
p0 = bf16[1,512,32,128]{3,2,1,0} parameter(0)
p1 = f32[3,3,128,128]{3,2,1,0} parameter(1)
%convolution.755 = f32[1,512,32,128]{3,2,1,0}
convolution(p0, p1),
window={size=3x3 pad=1_1x1_1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
%constant.19458 = f32[] constant(0)
ROOT %reduce.1354 = f32[128]{0} reduce(%convolution.755, %constant.19458),
dimensions={0,1,2}, to_apply=%region_0.134
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Reduce());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
70cab951-f568-4125-8238-ab87fb041758 | cpp | google/cel-cpp | overflow | internal/overflow.cc | internal/overflow_test.cc | #include "internal/overflow.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "internal/status_macros.h"
#include "internal/time.h"
namespace cel::internal {
namespace {
constexpr int64_t kInt32Max = std::numeric_limits<int32_t>::max();
constexpr int64_t kInt32Min = std::numeric_limits<int32_t>::lowest();
constexpr int64_t kInt64Max = std::numeric_limits<int64_t>::max();
constexpr int64_t kInt64Min = std::numeric_limits<int64_t>::lowest();
constexpr uint64_t kUint32Max = std::numeric_limits<uint32_t>::max();
ABSL_ATTRIBUTE_UNUSED constexpr uint64_t kUint64Max =
std::numeric_limits<uint64_t>::max();
constexpr uint64_t kUintToIntMax = static_cast<uint64_t>(kInt64Max);
constexpr double kDoubleToIntMax = static_cast<double>(kInt64Max);
constexpr double kDoubleToIntMin = static_cast<double>(kInt64Min);
const double kDoubleTwoTo64 = std::ldexp(1.0, 64);
const absl::Duration kOneSecondDuration = absl::Seconds(1);
const int64_t kOneSecondNanos = absl::ToInt64Nanoseconds(kOneSecondDuration);
const int64_t kMinUnixTime =
absl::ToInt64Seconds(MinTimestamp() - absl::UnixEpoch());
const int64_t kMaxUnixTime =
absl::ToInt64Seconds(MaxTimestamp() - absl::UnixEpoch());
absl::Status CheckRange(bool valid_expression,
absl::string_view error_message) {
return valid_expression ? absl::OkStatus()
: absl::OutOfRangeError(error_message);
}
absl::Status CheckArgument(bool valid_expression,
absl::string_view error_message) {
return valid_expression ? absl::OkStatus()
: absl::InvalidArgumentError(error_message);
}
bool IsFinite(absl::Duration d) {
return d != absl::InfiniteDuration() && d != -absl::InfiniteDuration();
}
bool IsFinite(absl::Time t) {
return t != absl::InfiniteFuture() && t != absl::InfinitePast();
}
}
absl::StatusOr<int64_t> CheckedAdd(int64_t x, int64_t y) {
#if ABSL_HAVE_BUILTIN(__builtin_add_overflow)
int64_t sum;
if (!__builtin_add_overflow(x, y, &sum)) {
return sum;
}
return absl::OutOfRangeError("integer overflow");
#else
CEL_RETURN_IF_ERROR(CheckRange(
y > 0 ? x <= kInt64Max - y : x >= kInt64Min - y, "integer overflow"));
return x + y;
#endif
}
absl::StatusOr<int64_t> CheckedSub(int64_t x, int64_t y) {
#if ABSL_HAVE_BUILTIN(__builtin_sub_overflow)
int64_t diff;
if (!__builtin_sub_overflow(x, y, &diff)) {
return diff;
}
return absl::OutOfRangeError("integer overflow");
#else
CEL_RETURN_IF_ERROR(CheckRange(
y < 0 ? x <= kInt64Max + y : x >= kInt64Min + y, "integer overflow"));
return x - y;
#endif
}
absl::StatusOr<int64_t> CheckedNegation(int64_t v) {
#if ABSL_HAVE_BUILTIN(__builtin_mul_overflow)
int64_t prod;
if (!__builtin_mul_overflow(v, -1, &prod)) {
return prod;
}
return absl::OutOfRangeError("integer overflow");
#else
CEL_RETURN_IF_ERROR(CheckRange(v != kInt64Min, "integer overflow"));
return -v;
#endif
}
absl::StatusOr<int64_t> CheckedMul(int64_t x, int64_t y) {
#if ABSL_HAVE_BUILTIN(__builtin_mul_overflow)
int64_t prod;
if (!__builtin_mul_overflow(x, y, &prod)) {
return prod;
}
return absl::OutOfRangeError("integer overflow");
#else
CEL_RETURN_IF_ERROR(
CheckRange(!((x == -1 && y == kInt64Min) || (y == -1 && x == kInt64Min) ||
(x > 0 && y > 0 && x > kInt64Max / y) ||
(x < 0 && y < 0 && x < kInt64Max / y) ||
(x > 0 && y < 0 && y < kInt64Min / x) ||
(x < 0 && y > 0 && x < kInt64Min / y)),
"integer overflow"));
return x * y;
#endif
}
absl::StatusOr<int64_t> CheckedDiv(int64_t x, int64_t y) {
CEL_RETURN_IF_ERROR(
CheckRange(x != kInt64Min || y != -1, "integer overflow"));
CEL_RETURN_IF_ERROR(CheckArgument(y != 0, "divide by zero"));
return x / y;
}
absl::StatusOr<int64_t> CheckedMod(int64_t x, int64_t y) {
CEL_RETURN_IF_ERROR(
CheckRange(x != kInt64Min || y != -1, "integer overflow"));
CEL_RETURN_IF_ERROR(CheckArgument(y != 0, "modulus by zero"));
return x % y;
}
absl::StatusOr<uint64_t> CheckedAdd(uint64_t x, uint64_t y) {
#if ABSL_HAVE_BUILTIN(__builtin_add_overflow)
uint64_t sum;
if (!__builtin_add_overflow(x, y, &sum)) {
return sum;
}
return absl::OutOfRangeError("unsigned integer overflow");
#else
CEL_RETURN_IF_ERROR(
CheckRange(x <= kUint64Max - y, "unsigned integer overflow"));
return x + y;
#endif
}
absl::StatusOr<uint64_t> CheckedSub(uint64_t x, uint64_t y) {
#if ABSL_HAVE_BUILTIN(__builtin_sub_overflow)
uint64_t diff;
if (!__builtin_sub_overflow(x, y, &diff)) {
return diff;
}
return absl::OutOfRangeError("unsigned integer overflow");
#else
CEL_RETURN_IF_ERROR(CheckRange(y <= x, "unsigned integer overflow"));
return x - y;
#endif
}
absl::StatusOr<uint64_t> CheckedMul(uint64_t x, uint64_t y) {
#if ABSL_HAVE_BUILTIN(__builtin_mul_overflow)
uint64_t prod;
if (!__builtin_mul_overflow(x, y, &prod)) {
return prod;
}
return absl::OutOfRangeError("unsigned integer overflow");
#else
CEL_RETURN_IF_ERROR(
CheckRange(y == 0 || x <= kUint64Max / y, "unsigned integer overflow"));
return x * y;
#endif
}
absl::StatusOr<uint64_t> CheckedDiv(uint64_t x, uint64_t y) {
CEL_RETURN_IF_ERROR(CheckArgument(y != 0, "divide by zero"));
return x / y;
}
absl::StatusOr<uint64_t> CheckedMod(uint64_t x, uint64_t y) {
CEL_RETURN_IF_ERROR(CheckArgument(y != 0, "modulus by zero"));
return x % y;
}
absl::StatusOr<absl::Duration> CheckedAdd(absl::Duration x, absl::Duration y) {
CEL_RETURN_IF_ERROR(
CheckRange(IsFinite(x) && IsFinite(y), "integer overflow"));
CEL_ASSIGN_OR_RETURN(int64_t nanos, CheckedAdd(absl::ToInt64Nanoseconds(x),
absl::ToInt64Nanoseconds(y)));
return absl::Nanoseconds(nanos);
}
absl::StatusOr<absl::Duration> CheckedSub(absl::Duration x, absl::Duration y) {
CEL_RETURN_IF_ERROR(
CheckRange(IsFinite(x) && IsFinite(y), "integer overflow"));
CEL_ASSIGN_OR_RETURN(int64_t nanos, CheckedSub(absl::ToInt64Nanoseconds(x),
absl::ToInt64Nanoseconds(y)));
return absl::Nanoseconds(nanos);
}
absl::StatusOr<absl::Duration> CheckedNegation(absl::Duration v) {
CEL_RETURN_IF_ERROR(CheckRange(IsFinite(v), "integer overflow"));
CEL_ASSIGN_OR_RETURN(int64_t nanos,
CheckedNegation(absl::ToInt64Nanoseconds(v)));
return absl::Nanoseconds(nanos);
}
absl::StatusOr<absl::Time> CheckedAdd(absl::Time t, absl::Duration d) {
CEL_RETURN_IF_ERROR(
CheckRange(IsFinite(t) && IsFinite(d), "timestamp overflow"));
const int64_t s1 = absl::ToUnixSeconds(t);
const int64_t ns1 = (t - absl::FromUnixSeconds(s1)) / absl::Nanoseconds(1);
const int64_t s2 = d / kOneSecondDuration;
const int64_t ns2 = absl::ToInt64Nanoseconds(d % kOneSecondDuration);
CEL_ASSIGN_OR_RETURN(int64_t s, CheckedAdd(s1, s2));
absl::Duration ns = absl::Nanoseconds(ns2 + ns1);
if (ns < absl::ZeroDuration() || ns >= kOneSecondDuration) {
CEL_ASSIGN_OR_RETURN(s, CheckedAdd(s, ns / kOneSecondDuration));
ns -= (ns / kOneSecondDuration) * kOneSecondDuration;
if (ns < absl::ZeroDuration()) {
CEL_ASSIGN_OR_RETURN(s, CheckedAdd(s, -1));
ns += kOneSecondDuration;
}
}
CEL_RETURN_IF_ERROR(
CheckRange(s >= kMinUnixTime && s <= kMaxUnixTime, "timestamp overflow"));
return absl::FromUnixSeconds(s) + ns;
}
absl::StatusOr<absl::Time> CheckedSub(absl::Time t, absl::Duration d) {
CEL_ASSIGN_OR_RETURN(auto neg_duration, CheckedNegation(d));
return CheckedAdd(t, neg_duration);
}
absl::StatusOr<absl::Duration> CheckedSub(absl::Time t1, absl::Time t2) {
CEL_RETURN_IF_ERROR(
CheckRange(IsFinite(t1) && IsFinite(t2), "integer overflow"));
const int64_t s1 = absl::ToUnixSeconds(t1);
const int64_t ns1 = (t1 - absl::FromUnixSeconds(s1)) / absl::Nanoseconds(1);
const int64_t s2 = absl::ToUnixSeconds(t2);
const int64_t ns2 = (t2 - absl::FromUnixSeconds(s2)) / absl::Nanoseconds(1);
CEL_ASSIGN_OR_RETURN(int64_t s, CheckedSub(s1, s2));
absl::Duration ns = absl::Nanoseconds(ns1 - ns2);
CEL_ASSIGN_OR_RETURN(const int64_t t, CheckedMul(s, kOneSecondNanos));
CEL_ASSIGN_OR_RETURN(const int64_t v,
CheckedAdd(t, absl::ToInt64Nanoseconds(ns)));
return absl::Nanoseconds(v);
}
absl::StatusOr<int64_t> CheckedDoubleToInt64(double v) {
CEL_RETURN_IF_ERROR(
CheckRange(std::isfinite(v) && v < kDoubleToIntMax && v > kDoubleToIntMin,
"double out of int64_t range"));
return static_cast<int64_t>(v);
}
absl::StatusOr<uint64_t> CheckedDoubleToUint64(double v) {
CEL_RETURN_IF_ERROR(
CheckRange(std::isfinite(v) && v >= 0 && v < kDoubleTwoTo64,
"double out of uint64_t range"));
return static_cast<uint64_t>(v);
}
absl::StatusOr<uint64_t> CheckedInt64ToUint64(int64_t v) {
CEL_RETURN_IF_ERROR(CheckRange(v >= 0, "int64 out of uint64_t range"));
return static_cast<uint64_t>(v);
}
absl::StatusOr<int32_t> CheckedInt64ToInt32(int64_t v) {
CEL_RETURN_IF_ERROR(
CheckRange(v >= kInt32Min && v <= kInt32Max, "int64 out of int32_t range"));
return static_cast<int32_t>(v);
}
absl::StatusOr<int64_t> CheckedUint64ToInt64(uint64_t v) {
CEL_RETURN_IF_ERROR(
CheckRange(v <= kUintToIntMax, "uint64 out of int64_t range"));
return static_cast<int64_t>(v);
}
absl::StatusOr<uint32_t> CheckedUint64ToUint32(uint64_t v) {
CEL_RETURN_IF_ERROR(
CheckRange(v <= kUint32Max, "uint64 out of uint32_t range"));
return static_cast<uint32_t>(v);
}
} | #include "internal/overflow.h"
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
using ::testing::HasSubstr;
using ::testing::ValuesIn;
template <typename T>
struct TestCase {
std::string test_name;
absl::FunctionRef<absl::StatusOr<T>()> op;
absl::StatusOr<T> result;
};
template <typename T>
void ExpectResult(const T& test_case) {
auto result = test_case.op();
ASSERT_EQ(result.status().code(), test_case.result.status().code());
if (result.ok()) {
EXPECT_EQ(*result, *test_case.result);
} else {
EXPECT_THAT(result.status().message(),
HasSubstr(test_case.result.status().message()));
}
}
using IntTestCase = TestCase<int64_t>;
using CheckedIntResultTest = testing::TestWithParam<IntTestCase>;
TEST_P(CheckedIntResultTest, IntOperations) { ExpectResult(GetParam()); }
INSTANTIATE_TEST_SUITE_P(
CheckedIntMathTest, CheckedIntResultTest,
ValuesIn(std::vector<IntTestCase>{
{"OneAddOne", [] { return CheckedAdd(1L, 1L); }, 2L},
{"ZeroAddOne", [] { return CheckedAdd(0, 1L); }, 1L},
{"ZeroAddMinusOne", [] { return CheckedAdd(0, -1L); }, -1L},
{"OneAddZero", [] { return CheckedAdd(1L, 0); }, 1L},
{"MinusOneAddZero", [] { return CheckedAdd(-1L, 0); }, -1L},
{"OneAddIntMax",
[] { return CheckedAdd(1L, std::numeric_limits<int64_t>::max()); },
absl::OutOfRangeError("integer overflow")},
{"MinusOneAddIntMin",
[] { return CheckedAdd(-1L, std::numeric_limits<int64_t>::lowest()); },
absl::OutOfRangeError("integer overflow")},
{"TwoSubThree", [] { return CheckedSub(2L, 3L); }, -1L},
{"TwoSubZero", [] { return CheckedSub(2L, 0); }, 2L},
{"ZeroSubTwo", [] { return CheckedSub(0, 2L); }, -2L},
{"MinusTwoSubThree", [] { return CheckedSub(-2L, 3L); }, -5L},
{"MinusTwoSubZero", [] { return CheckedSub(-2L, 0); }, -2L},
{"ZeroSubMinusTwo", [] { return CheckedSub(0, -2L); }, 2L},
{"IntMinSubIntMax",
[] {
return CheckedSub(std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::lowest());
},
absl::OutOfRangeError("integer overflow")},
{"TwoMulThree", [] { return CheckedMul(2L, 3L); }, 6L},
{"MinusTwoMulThree", [] { return CheckedMul(-2L, 3L); }, -6L},
{"MinusTwoMulMinusThree", [] { return CheckedMul(-2L, -3L); }, 6L},
{"TwoMulMinusThree", [] { return CheckedMul(2L, -3L); }, -6L},
{"TwoMulIntMax",
[] { return CheckedMul(2L, std::numeric_limits<int64_t>::max()); },
absl::OutOfRangeError("integer overflow")},
{"MinusOneMulIntMin",
[] { return CheckedMul(-1L, std::numeric_limits<int64_t>::lowest()); },
absl::OutOfRangeError("integer overflow")},
{"IntMinMulMinusOne",
[] { return CheckedMul(std::numeric_limits<int64_t>::lowest(), -1L); },
absl::OutOfRangeError("integer overflow")},
{"IntMinMulZero",
[] { return CheckedMul(std::numeric_limits<int64_t>::lowest(), 0); },
0},
{"ZeroMulIntMin",
[] { return CheckedMul(0, std::numeric_limits<int64_t>::lowest()); },
0},
{"IntMaxMulZero",
[] { return CheckedMul(std::numeric_limits<int64_t>::max(), 0); }, 0},
{"ZeroMulIntMax",
[] { return CheckedMul(0, std::numeric_limits<int64_t>::max()); }, 0},
{"ZeroDivOne", [] { return CheckedDiv(0, 1L); }, 0},
{"TenDivTwo", [] { return CheckedDiv(10L, 2L); }, 5},
{"TenDivMinusOne", [] { return CheckedDiv(10L, -1L); }, -10},
{"MinusTenDivMinusOne", [] { return CheckedDiv(-10L, -1L); }, 10},
{"MinusTenDivTwo", [] { return CheckedDiv(-10L, 2L); }, -5},
{"OneDivZero", [] { return CheckedDiv(1L, 0L); },
absl::InvalidArgumentError("divide by zero")},
{"IntMinDivMinusOne",
[] { return CheckedDiv(std::numeric_limits<int64_t>::lowest(), -1L); },
absl::OutOfRangeError("integer overflow")},
{"ZeroModTwo", [] { return CheckedMod(0, 2L); }, 0},
{"TwoModTwo", [] { return CheckedMod(2L, 2L); }, 0},
{"ThreeModTwo", [] { return CheckedMod(3L, 2L); }, 1L},
{"TwoModZero", [] { return CheckedMod(2L, 0); },
absl::InvalidArgumentError("modulus by zero")},
{"IntMinModTwo",
[] { return CheckedMod(std::numeric_limits<int64_t>::lowest(), 2L); },
0},
{"IntMaxModMinusOne",
[] { return CheckedMod(std::numeric_limits<int64_t>::max(), -1L); },
0},
{"IntMinModMinusOne",
[] { return CheckedMod(std::numeric_limits<int64_t>::lowest(), -1L); },
absl::OutOfRangeError("integer overflow")},
{"NegateOne", [] { return CheckedNegation(1L); }, -1L},
{"NegateMinInt64",
[] { return CheckedNegation(std::numeric_limits<int64_t>::lowest()); },
absl::OutOfRangeError("integer overflow")},
{"Uint64Conversion", [] { return CheckedUint64ToInt64(1UL); }, 1L},
{"Uint32MaxConversion",
[] {
return CheckedUint64ToInt64(
static_cast<uint64_t>(std::numeric_limits<int64_t>::max()));
},
std::numeric_limits<int64_t>::max()},
{"Uint32MaxConversionError",
[] {
return CheckedUint64ToInt64(
static_cast<uint64_t>(std::numeric_limits<uint64_t>::max()));
},
absl::OutOfRangeError("out of int64_t range")},
{"DoubleConversion", [] { return CheckedDoubleToInt64(100.1); }, 100L},
{"DoubleInt64MaxConversionError",
[] {
return CheckedDoubleToInt64(
static_cast<double>(std::numeric_limits<int64_t>::max()));
},
absl::OutOfRangeError("out of int64_t range")},
{"DoubleInt64MaxMinus512Conversion",
[] {
return CheckedDoubleToInt64(
static_cast<double>(std::numeric_limits<int64_t>::max() - 512));
},
std::numeric_limits<int64_t>::max() - 1023},
{"DoubleInt64MaxMinus1024Conversion",
[] {
return CheckedDoubleToInt64(
static_cast<double>(std::numeric_limits<int64_t>::max() - 1024));
},
std::numeric_limits<int64_t>::max() - 1023},
{"DoubleInt64MinConversionError",
[] {
return CheckedDoubleToInt64(
static_cast<double>(std::numeric_limits<int64_t>::lowest()));
},
absl::OutOfRangeError("out of int64_t range")},
{"DoubleInt64MinMinusOneConversionError",
[] {
return CheckedDoubleToInt64(
static_cast<double>(std::numeric_limits<int64_t>::lowest()) -
1.0);
},
absl::OutOfRangeError("out of int64_t range")},
{"DoubleInt64MinMinus511ConversionError",
[] {
return CheckedDoubleToInt64(
static_cast<double>(std::numeric_limits<int64_t>::lowest()) -
511.0);
},
absl::OutOfRangeError("out of int64_t range")},
{"InfiniteConversionError",
[] {
return CheckedDoubleToInt64(std::numeric_limits<double>::infinity());
},
absl::OutOfRangeError("out of int64_t range")},
{"NegRangeConversionError",
[] { return CheckedDoubleToInt64(-1.0e99); },
absl::OutOfRangeError("out of int64_t range")},
{"PosRangeConversionError", [] { return CheckedDoubleToInt64(1.0e99); },
absl::OutOfRangeError("out of int64_t range")},
}),
[](const testing::TestParamInfo<CheckedIntResultTest::ParamType>& info) {
return info.param.test_name;
});
using UintTestCase = TestCase<uint64_t>;
using CheckedUintResultTest = testing::TestWithParam<UintTestCase>;
TEST_P(CheckedUintResultTest, UnsignedOperations) { ExpectResult(GetParam()); }
INSTANTIATE_TEST_SUITE_P(
CheckedUintMathTest, CheckedUintResultTest,
ValuesIn(std::vector<UintTestCase>{
{"OneAddOne", [] { return CheckedAdd(1UL, 1UL); }, 2UL},
{"ZeroAddOne", [] { return CheckedAdd(0, 1UL); }, 1UL},
{"OneAddZero", [] { return CheckedAdd(1UL, 0); }, 1UL},
{"OneAddIntMax",
[] { return CheckedAdd(1UL, std::numeric_limits<uint64_t>::max()); },
absl::OutOfRangeError("unsigned integer overflow")},
{"OneSubOne", [] { return CheckedSub(1UL, 1UL); }, 0},
{"ZeroSubOne", [] { return CheckedSub(0, 1UL); },
absl::OutOfRangeError("unsigned integer overflow")},
{"OneSubZero", [] { return CheckedSub(1UL, 0); }, 1UL},
{"OneMulOne", [] { return CheckedMul(1UL, 1UL); }, 1UL},
{"ZeroMulOne", [] { return CheckedMul(0, 1UL); }, 0},
{"OneMulZero", [] { return CheckedMul(1UL, 0); }, 0},
{"TwoMulUintMax",
[] { return CheckedMul(2UL, std::numeric_limits<uint64_t>::max()); },
absl::OutOfRangeError("unsigned integer overflow")},
{"TwoDivTwo", [] { return CheckedDiv(2UL, 2UL); }, 1UL},
{"TwoDivFour", [] { return CheckedDiv(2UL, 4UL); }, 0},
{"OneDivZero", [] { return CheckedDiv(1UL, 0); },
absl::InvalidArgumentError("divide by zero")},
{"TwoModTwo", [] { return CheckedMod(2UL, 2UL); }, 0},
{"TwoModFour", [] { return CheckedMod(2UL, 4UL); }, 2UL},
{"OneModZero", [] { return CheckedMod(1UL, 0); },
absl::InvalidArgumentError("modulus by zero")},
{"Int64Conversion", [] { return CheckedInt64ToUint64(1L); }, 1UL},
{"Int64MaxConversion",
[] {
return CheckedInt64ToUint64(std::numeric_limits<int64_t>::max());
},
static_cast<uint64_t>(std::numeric_limits<int64_t>::max())},
{"NegativeInt64ConversionError",
[] { return CheckedInt64ToUint64(-1L); },
absl::OutOfRangeError("out of uint64_t range")},
{"DoubleConversion", [] { return CheckedDoubleToUint64(100.1); },
100UL},
{"DoubleUint64MaxConversionError",
[] {
return CheckedDoubleToUint64(
static_cast<double>(std::numeric_limits<uint64_t>::max()));
},
absl::OutOfRangeError("out of uint64_t range")},
{"DoubleUint64MaxMinus512Conversion",
[] {
return CheckedDoubleToUint64(
static_cast<double>(std::numeric_limits<uint64_t>::max() - 512));
},
absl::OutOfRangeError("out of uint64_t range")},
{"DoubleUint64MaxMinus1024Conversion",
[] {
return CheckedDoubleToUint64(static_cast<double>(
std::numeric_limits<uint64_t>::max() - 1024));
},
std::numeric_limits<uint64_t>::max() - 2047},
{"InfiniteConversionError",
[] {
return CheckedDoubleToUint64(
std::numeric_limits<double>::infinity());
},
absl::OutOfRangeError("out of uint64_t range")},
{"NegConversionError", [] { return CheckedDoubleToUint64(-1.1); },
absl::OutOfRangeError("out of uint64_t range")},
{"NegRangeConversionError",
[] { return CheckedDoubleToUint64(-1.0e99); },
absl::OutOfRangeError("out of uint64_t range")},
{"PosRangeConversionError",
[] { return CheckedDoubleToUint64(1.0e99); },
absl::OutOfRangeError("out of uint64_t range")},
}),
[](const testing::TestParamInfo<CheckedUintResultTest::ParamType>& info) {
return info.param.test_name;
});
using DurationTestCase = TestCase<absl::Duration>;
using CheckedDurationResultTest = testing::TestWithParam<DurationTestCase>;
TEST_P(CheckedDurationResultTest, DurationOperations) {
ExpectResult(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
CheckedDurationMathTest, CheckedDurationResultTest,
ValuesIn(std::vector<DurationTestCase>{
{"OneSecondAddOneSecond",
[] { return CheckedAdd(absl::Seconds(1), absl::Seconds(1)); },
absl::Seconds(2)},
{"MaxDurationAddOneNano",
[] {
return CheckedAdd(
absl::Nanoseconds(std::numeric_limits<int64_t>::max()),
absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"MinDurationAddMinusOneNano",
[] {
return CheckedAdd(
absl::Nanoseconds(std::numeric_limits<int64_t>::lowest()),
absl::Nanoseconds(-1));
},
absl::OutOfRangeError("integer overflow")},
{"InfinityAddOneNano",
[] {
return CheckedAdd(absl::InfiniteDuration(), absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"NegInfinityAddOneNano",
[] {
return CheckedAdd(-absl::InfiniteDuration(), absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"OneSecondAddInfinity",
[] {
return CheckedAdd(absl::Nanoseconds(1), absl::InfiniteDuration());
},
absl::OutOfRangeError("integer overflow")},
{"OneSecondAddNegInfinity",
[] {
return CheckedAdd(absl::Nanoseconds(1), -absl::InfiniteDuration());
},
absl::OutOfRangeError("integer overflow")},
{"OneSecondSubOneSecond",
[] { return CheckedSub(absl::Seconds(1), absl::Seconds(1)); },
absl::ZeroDuration()},
{"MinDurationSubOneSecond",
[] {
return CheckedSub(
absl::Nanoseconds(std::numeric_limits<int64_t>::lowest()),
absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"InfinitySubOneNano",
[] {
return CheckedSub(absl::InfiniteDuration(), absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"NegInfinitySubOneNano",
[] {
return CheckedSub(-absl::InfiniteDuration(), absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"OneNanoSubInfinity",
[] {
return CheckedSub(absl::Nanoseconds(1), absl::InfiniteDuration());
},
absl::OutOfRangeError("integer overflow")},
{"OneNanoSubNegInfinity",
[] {
return CheckedSub(absl::Nanoseconds(1), -absl::InfiniteDuration());
},
absl::OutOfRangeError("integer overflow")},
{"TimeSubOneSecond",
[] {
return CheckedSub(absl::FromUnixSeconds(100),
absl::FromUnixSeconds(1));
},
absl::Seconds(99)},
{"TimeWithNanosPositive",
[] {
return CheckedSub(absl::FromUnixSeconds(2) + absl::Nanoseconds(1),
absl::FromUnixSeconds(1) - absl::Nanoseconds(1));
},
absl::Seconds(1) + absl::Nanoseconds(2)},
{"TimeWithNanosNegative",
[] {
return CheckedSub(absl::FromUnixSeconds(1) + absl::Nanoseconds(1),
absl::FromUnixSeconds(2) + absl::Seconds(1) -
absl::Nanoseconds(1));
},
absl::Seconds(-2) + absl::Nanoseconds(2)},
{"MinTimestampMinusOne",
[] {
return CheckedSub(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::lowest()),
absl::FromUnixSeconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"InfinitePastSubOneSecond",
[] {
return CheckedSub(absl::InfinitePast(), absl::FromUnixSeconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"InfiniteFutureSubOneMinusSecond",
[] {
return CheckedSub(absl::InfiniteFuture(), absl::FromUnixSeconds(-1));
},
absl::OutOfRangeError("integer overflow")},
{"InfiniteFutureSubInfinitePast",
[] {
return CheckedSub(absl::InfiniteFuture(), absl::InfinitePast());
},
absl::OutOfRangeError("integer overflow")},
{"InfinitePastSubInfiniteFuture",
[] {
return CheckedSub(absl::InfinitePast(), absl::InfiniteFuture());
},
absl::OutOfRangeError("integer overflow")},
{"NegateOneSecond", [] { return CheckedNegation(absl::Seconds(1)); },
absl::Seconds(-1)},
{"NegateMinDuration",
[] {
return CheckedNegation(
absl::Nanoseconds(std::numeric_limits<int64_t>::lowest()));
},
absl::OutOfRangeError("integer overflow")},
{"NegateInfiniteDuration",
[] { return CheckedNegation(absl::InfiniteDuration()); },
absl::OutOfRangeError("integer overflow")},
{"NegateNegInfiniteDuration",
[] { return CheckedNegation(-absl::InfiniteDuration()); },
absl::OutOfRangeError("integer overflow")},
}),
[](const testing::TestParamInfo<CheckedDurationResultTest::ParamType>&
info) { return info.param.test_name; });
using TimeTestCase = TestCase<absl::Time>;
using CheckedTimeResultTest = testing::TestWithParam<TimeTestCase>;
TEST_P(CheckedTimeResultTest, TimeDurationOperations) {
ExpectResult(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
CheckedTimeDurationMathTest, CheckedTimeResultTest,
ValuesIn(std::vector<TimeTestCase>{
{"DateAddOneHourMinusOneMilli",
[] {
return CheckedAdd(absl::FromUnixSeconds(3506),
absl::Hours(1) + absl::Milliseconds(-1));
},
absl::FromUnixSeconds(7106) + absl::Milliseconds(-1)},
{"DateAddOneHourOneNano",
[] {
return CheckedAdd(absl::FromUnixSeconds(3506),
absl::Hours(1) + absl::Nanoseconds(1));
},
absl::FromUnixSeconds(7106) + absl::Nanoseconds(1)},
{"MaxIntAddOneSecond",
[] {
return CheckedAdd(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max()),
absl::Seconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"MaxTimestampAddOneSecond",
[] {
return CheckedAdd(absl::FromUnixSeconds(253402300799),
absl::Seconds(1));
},
absl::OutOfRangeError("timestamp overflow")},
{"TimeWithNanosNegative",
[] {
return CheckedAdd(absl::FromUnixSeconds(1) + absl::Nanoseconds(1),
absl::Nanoseconds(-999999999));
},
absl::FromUnixNanos(2)},
{"TimeWithNanosPositive",
[] {
return CheckedAdd(
absl::FromUnixSeconds(1) + absl::Nanoseconds(999999999),
absl::Nanoseconds(999999999));
},
absl::FromUnixSeconds(2) + absl::Nanoseconds(999999998)},
{"SecondsAddInfinity",
[] {
return CheckedAdd(
absl::FromUnixSeconds(1) + absl::Nanoseconds(999999999),
absl::InfiniteDuration());
},
absl::OutOfRangeError("timestamp overflow")},
{"SecondsAddNegativeInfinity",
[] {
return CheckedAdd(
absl::FromUnixSeconds(1) + absl::Nanoseconds(999999999),
-absl::InfiniteDuration());
},
absl::OutOfRangeError("timestamp overflow")},
{"InfiniteFutureAddNegativeInfinity",
[] {
return CheckedAdd(absl::InfiniteFuture(), -absl::InfiniteDuration());
},
absl::OutOfRangeError("timestamp overflow")},
{"InfinitePastAddInfinity",
[] {
return CheckedAdd(absl::InfinitePast(), absl::InfiniteDuration());
},
absl::OutOfRangeError("timestamp overflow")},
{"DateSubOneHour",
[] { return CheckedSub(absl::FromUnixSeconds(3506), absl::Hours(1)); },
absl::FromUnixSeconds(-94)},
{"MinTimestampSubOneSecond",
[] {
return CheckedSub(absl::FromUnixSeconds(-62135596800),
absl::Seconds(1));
},
absl::OutOfRangeError("timestamp overflow")},
{"MinIntSubOneViaNanos",
[] {
return CheckedSub(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::min()),
absl::Nanoseconds(1));
},
absl::OutOfRangeError("integer overflow")},
{"MinTimestampSubOneViaNanosScaleOverflow",
[] {
return CheckedSub(
absl::FromUnixSeconds(-62135596800) + absl::Nanoseconds(1),
absl::Nanoseconds(999999999));
},
absl::OutOfRangeError("timestamp overflow")},
{"SecondsSubInfinity",
[] {
return CheckedSub(
absl::FromUnixSeconds(1) + absl::Nanoseconds(999999999),
absl::InfiniteDuration());
},
absl::OutOfRangeError("integer overflow")},
{"SecondsSubNegInfinity",
[] {
return CheckedSub(
absl::FromUnixSeconds(1) + absl::Nanoseconds(999999999),
-absl::InfiniteDuration());
},
absl::OutOfRangeError("integer overflow")},
}),
[](const testing::TestParamInfo<CheckedTimeResultTest::ParamType>& info) {
return info.param.test_name;
});
using ConvertInt64Int32TestCase = TestCase<int32_t>;
using CheckedConvertInt64Int32Test =
testing::TestWithParam<ConvertInt64Int32TestCase>;
TEST_P(CheckedConvertInt64Int32Test, Conversions) { ExpectResult(GetParam()); }
INSTANTIATE_TEST_SUITE_P(
CheckedConvertInt64Int32Test, CheckedConvertInt64Int32Test,
ValuesIn(std::vector<ConvertInt64Int32TestCase>{
{"SimpleConversion", [] { return CheckedInt64ToInt32(1L); }, 1},
{"Int32MaxConversion",
[] {
return CheckedInt64ToInt32(
static_cast<int64_t>(std::numeric_limits<int32_t>::max()));
},
std::numeric_limits<int32_t>::max()},
{"Int32MaxConversionError",
[] {
return CheckedInt64ToInt32(
static_cast<int64_t>(std::numeric_limits<int64_t>::max()));
},
absl::OutOfRangeError("out of int32_t range")},
{"Int32MinConversion",
[] {
return CheckedInt64ToInt32(
static_cast<int64_t>(std::numeric_limits<int32_t>::lowest()));
},
std::numeric_limits<int32_t>::lowest()},
{"Int32MinConversionError",
[] {
return CheckedInt64ToInt32(
static_cast<int64_t>(std::numeric_limits<int64_t>::lowest()));
},
absl::OutOfRangeError("out of int32_t range")},
}),
[](const testing::TestParamInfo<CheckedConvertInt64Int32Test::ParamType>&
info) { return info.param.test_name; });
using ConvertUint64Uint32TestCase = TestCase<uint32_t>;
using CheckedConvertUint64Uint32Test =
testing::TestWithParam<ConvertUint64Uint32TestCase>;
TEST_P(CheckedConvertUint64Uint32Test, Conversions) {
ExpectResult(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
CheckedConvertUint64Uint32Test, CheckedConvertUint64Uint32Test,
ValuesIn(std::vector<ConvertUint64Uint32TestCase>{
{"SimpleConversion", [] { return CheckedUint64ToUint32(1UL); }, 1U},
{"Uint32MaxConversion",
[] {
return CheckedUint64ToUint32(
static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()));
},
std::numeric_limits<uint32_t>::max()},
{"Uint32MaxConversionError",
[] {
return CheckedUint64ToUint32(
static_cast<uint64_t>(std::numeric_limits<uint64_t>::max()));
},
absl::OutOfRangeError("out of uint32_t range")},
}),
[](const testing::TestParamInfo<CheckedConvertUint64Uint32Test::ParamType>&
info) { return info.param.test_name; });
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/overflow.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/overflow_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b8d6818f-df0e-4df0-98c7-5f5861ecee80 | cpp | tensorflow/tensorflow | sendrecv_ops | tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc | tensorflow/core/kernels/sendrecv_ops_test.cc | #include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class SendOp : public XlaOpKernel {
public:
explicit SendOp(OpKernelConstruction* ctx);
void Compile(XlaOpKernelContext* ctx) override;
private:
string tensor_name_;
SendOp(const SendOp&) = delete;
void operator=(const SendOp&) = delete;
};
SendOp::SendOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("tensor_name", &tensor_name_));
}
void SendOp::Compile(XlaOpKernelContext* ctx) {
XlaCompiler* compiler = ctx->compiler();
xla::ChannelHandle channel;
OP_REQUIRES_OK(ctx, compiler->GetChannelHandle(tensor_name_, &channel));
xla::Send(ctx->Input(0), channel);
}
REGISTER_XLA_OP(Name("XlaSend"), SendOp);
class RecvOp : public XlaOpKernel {
public:
explicit RecvOp(OpKernelConstruction* ctx);
void Compile(XlaOpKernelContext* ctx) override;
private:
string tensor_name_;
xla::Shape shape_;
RecvOp(const RecvOp&) = delete;
void operator=(const RecvOp&) = delete;
};
RecvOp::RecvOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("tensor_name", &tensor_name_));
TensorShape tensor_shape;
DataType dtype;
OP_REQUIRES_OK(ctx, ctx->GetAttr("shape", &tensor_shape));
OP_REQUIRES_OK(ctx, ctx->GetAttr("dtype", &dtype));
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, tensor_shape, &shape_));
}
void RecvOp::Compile(XlaOpKernelContext* ctx) {
XlaCompiler* compiler = ctx->compiler();
xla::ChannelHandle channel;
OP_REQUIRES_OK(ctx, compiler->GetChannelHandle(tensor_name_, &channel));
ctx->SetOutput(0, xla::Recv(ctx->builder(), shape_, channel));
}
REGISTER_XLA_OP(Name("XlaRecv"), RecvOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class DummyRendezvous : public Rendezvous {
Status Send(const ParsedKey& key, const Args& args, const Tensor& val,
const bool is_dead) override {
return absl::OkStatus();
}
void RecvAsync(const ParsedKey& key, const Args& args,
DoneCallback done) override {
static Tensor* t = new Tensor(DT_FLOAT, TensorShape({0}));
done(absl::OkStatus(), args, args, *t, false);
}
void StartAbort(const Status& status) override {}
};
static Graph* Send() {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(DT_FLOAT, TensorShape({0}));
test::graph::Send(g, test::graph::Constant(g, in0), "T", "/cpu:0", 1,
"/cpu:0");
test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0");
return g;
}
static Graph* Recv() {
Graph* g = new Graph(OpRegistry::Global());
test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0");
return g;
}
void BM_Send(::testing::benchmark::State& state) {
test::Benchmark("cpu", Send(), nullptr, nullptr, new DummyRendezvous, "",
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_Send)->UseRealTime();
void BM_Recv(::testing::benchmark::State& state) {
test::Benchmark("cpu", Recv(), nullptr, nullptr, new DummyRendezvous, "",
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_Recv)->UseRealTime();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sendrecv_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd6f7740-592e-4590-b580-e12b3fe549c5 | cpp | google/tensorstore | async_cache | tensorstore/internal/cache/async_cache.cc | tensorstore/internal/cache/async_cache_test.cc | #include "tensorstore/internal/cache/async_cache.h"
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstddef>
#include <functional>
#include <mutex>
#include <type_traits>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/batch_impl.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/compare.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
using Entry = AsyncCache::Entry;
using ReadState = AsyncCache::ReadState;
using TransactionNode = AsyncCache::TransactionNode;
using TransactionTree = AsyncCache::Entry::TransactionTree;
using PendingWritebackQueueAccessor =
TransactionNode::PendingWritebackQueueAccessor;
using PrepareForCommitState = TransactionNode::PrepareForCommitState;
constexpr absl::Duration kEpsilonDuration = absl::Nanoseconds(1);
void AcquireReadRequestReference(Entry& entry) {
internal::PinnedCacheEntry<AsyncCache>(&entry).release();
}
void ReleaseReadRequestReference(Entry& entry) {
internal::PinnedCacheEntry<AsyncCache>(&entry, internal::adopt_object_ref);
}
void AcquireReadRequestReference(TransactionNode& node) {
if (!node.transaction()->commit_started()) {
node.transaction()->AcquireCommitBlock();
}
intrusive_ptr_increment(&node);
}
void ReleaseReadRequestReference(TransactionNode& node) {
if (!node.transaction()->commit_started()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< node << "Releasing commit block";
node.transaction()->ReleaseCommitBlock();
}
intrusive_ptr_decrement(&node);
}
Future<const void> GetFuture(Promise<void>& promise) {
if (!promise.null()) {
auto future = promise.future();
if (!future.null()) return future;
}
auto pair = PromiseFuturePair<void>::Make();
promise = std::move(pair.promise);
return std::move(pair.future);
}
const AsyncCache::ReadRequestState& GetEffectiveReadRequestState(Entry& entry) {
return entry.read_request_state_;
}
const AsyncCache::ReadRequestState& GetEffectiveReadRequestState(
TransactionNode& node) {
return node.reads_committed_ ? GetOwningEntry(node).read_request_state_
: node.read_request_state_;
}
template <typename EntryOrNode>
void EntryOrNodeStartRead(EntryOrNode& entry_or_node,
UniqueWriterLock<Entry> lock, Batch::View batch) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
auto& request_state = entry_or_node.read_request_state_;
if (request_state.queued_request_is_deferred) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< entry_or_node << "EntryOrNodeStartRead: no pending read request";
return;
}
if (!request_state.queued.result_needed()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< entry_or_node
<< "EntryOrNodeStartRead: pending read request was cancelled";
request_state.queued = Promise<void>();
request_state.queued_request_is_deferred = true;
request_state.queued_time = absl::InfinitePast();
return;
}
assert(request_state.issued.null());
auto staleness_bound = request_state.issued_time =
std::exchange(request_state.queued_time, absl::InfinitePast());
request_state.issued = std::move(request_state.queued);
request_state.queued_request_is_deferred = true;
lock.unlock();
AcquireReadRequestReference(entry_or_node);
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< entry_or_node << "EntryOrNodeStartRead: calling DoRead";
AsyncCache::AsyncCacheReadRequest read_request;
read_request.staleness_bound = staleness_bound;
read_request.batch = batch;
entry_or_node.DoRead(std::move(read_request));
}
void MaybeStartReadOrWriteback(Entry& entry, UniqueWriterLock<Entry> lock,
Batch::View read_batch) {
auto& read_request_state = entry.read_request_state_;
if (TransactionNode* committing_transaction_node =
entry.committing_transaction_node_) {
TransactionNode* next;
while (true) {
const auto existing_prepare_for_commit_state =
committing_transaction_node->prepare_for_commit_state_.load(
std::memory_order_relaxed);
const bool read_request_issued = !read_request_state.issued.null();
PrepareForCommitState new_prepare_for_commit_state;
switch (existing_prepare_for_commit_state) {
case PrepareForCommitState::kNone:
case PrepareForCommitState::kPrepareDoneCalled:
new_prepare_for_commit_state =
PrepareForCommitState::kPrepareDoneCalled;
if (read_request_issued) break;
[[fallthrough]];
case PrepareForCommitState::kReadyForCommitCalled:
new_prepare_for_commit_state =
PrepareForCommitState::kReadyForCommitCalled;
}
committing_transaction_node->prepare_for_commit_state_ =
new_prepare_for_commit_state;
next =
PendingWritebackQueueAccessor::GetNext(committing_transaction_node);
if (next == committing_transaction_node ||
next->transaction() != committing_transaction_node->transaction() ||
next->prepare_for_commit_state_.load(std::memory_order_relaxed) ==
PrepareForCommitState::kReadyForCommitCalled) {
next = nullptr;
}
lock.unlock();
switch (existing_prepare_for_commit_state) {
case PrepareForCommitState::kNone:
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *committing_transaction_node << "PrepareDone";
committing_transaction_node->PrepareDone();
[[fallthrough]];
case PrepareForCommitState::kPrepareDoneCalled:
if (read_request_issued) return;
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *committing_transaction_node << "ReadyForCommit";
committing_transaction_node->ReadyForCommit();
break;
case PrepareForCommitState::kReadyForCommitCalled:
break;
}
if (!next) return;
committing_transaction_node = next;
lock = UniqueWriterLock<Entry>(entry);
}
}
if (read_request_state.issued.null()) {
EntryOrNodeStartRead(entry, std::move(lock), read_batch);
}
}
void MaybeIssueRead(Entry& entry, UniqueWriterLock<Entry> lock,
Batch::View batch) {
MaybeStartReadOrWriteback(entry, std::move(lock), batch);
}
void MaybeIssueRead(TransactionNode& node, UniqueWriterLock<Entry> lock,
Batch::View batch) {
if (!node.read_request_state_.issued.null()) return;
EntryOrNodeStartRead(node, std::move(lock), batch);
}
template <typename EntryOrNode>
void SetReadState(EntryOrNode& entry_or_node, ReadState&& read_state,
size_t read_state_size) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
if constexpr (std::is_same_v<EntryOrNode, TransactionNode>) {
if (entry_or_node.reads_committed_) {
assert(entry_or_node.prepare_for_commit_state_.load(
std::memory_order_relaxed) ==
PrepareForCommitState::kReadyForCommitCalled);
SetReadState(GetOwningEntry(entry_or_node), std::move(read_state),
read_state_size);
return;
}
}
entry_or_node.read_request_state_.known_to_be_stale = false;
entry_or_node.read_request_state_.read_state = std::move(read_state);
size_t change =
read_state_size -
std::exchange(entry_or_node.read_request_state_.read_state_size,
read_state_size);
if (change != 0) {
if constexpr (std::is_same_v<EntryOrNode, TransactionNode>) {
entry_or_node.UpdateSizeInBytes(change);
} else {
entry_or_node.NotifySizeChanged();
}
}
}
template <typename EntryOrNode>
class AsyncCacheBatchEntry : public Batch::Impl::Entry {
public:
using EntryOrNodePtr =
std::conditional_t<std::is_same_v<EntryOrNode, AsyncCache::Entry>,
PinnedCacheEntry<AsyncCache>,
OpenTransactionNodePtr<AsyncCache::TransactionNode>>;
using KeyParam = internal_future::FutureStateBase*;
explicit AsyncCacheBatchEntry(size_t nesting_depth,
EntryOrNode& entry_or_node,
Promise<void> promise)
: Batch::Impl::Entry(nesting_depth),
entry_or_node_(&entry_or_node),
promise_(std::move(promise)) {}
KeyParam key() const { return &internal_future::FutureAccess::rep(promise_); }
private:
void Submit(Batch::View batch) override {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *entry_or_node_ << "Submitting batch read";
auto& entry = GetOwningEntry(*entry_or_node_);
UniqueWriterLock lock(entry);
auto& read_request_state = entry_or_node_->read_request_state_;
if (!HaveSameSharedState(read_request_state.queued, promise_)) {
return;
}
read_request_state.queued_request_is_deferred = false;
MaybeIssueRead(*entry_or_node_, std::move(lock), batch);
delete this;
}
EntryOrNodePtr entry_or_node_;
Promise<void> promise_;
};
template <typename EntryOrNode>
Future<const void> RequestRead(EntryOrNode& entry_or_node,
AsyncCache::AsyncCacheReadRequest options,
bool must_not_be_known_to_be_stale) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
auto& entry = GetOwningEntry(entry_or_node);
UniqueWriterLock lock(entry);
auto& effective_request_state = GetEffectiveReadRequestState(entry_or_node);
const auto existing_time = effective_request_state.read_state.stamp.time;
if (existing_time != absl::InfinitePast() &&
existing_time >= options.staleness_bound) {
if (must_not_be_known_to_be_stale &&
effective_request_state.known_to_be_stale) {
options.staleness_bound = existing_time + kEpsilonDuration;
} else {
return MakeReadyFuture();
}
}
auto& request_state = entry_or_node.read_request_state_;
request_state.queued_time =
std::max(request_state.queued_time,
std::min(options.staleness_bound, absl::Now()));
if (!request_state.issued.null() &&
request_state.issued_time >= options.staleness_bound) {
return GetFuture(request_state.issued);
}
auto future = GetFuture(request_state.queued);
if (options.batch.deferred() && request_state.queued_request_is_deferred) {
using BatchE = AsyncCacheBatchEntry<EntryOrNode>;
auto& promise = request_state.queued;
Batch::Impl::From(options.batch)
->GetEntry<BatchE>(&internal_future::FutureAccess::rep(promise), [&] {
return std::make_unique<BatchE>(
GetOwningCache(entry).BatchNestingDepth(), entry_or_node,
promise);
});
} else {
request_state.queued_request_is_deferred = false;
}
MaybeIssueRead(entry_or_node, std::move(lock), options.batch);
return future;
}
class QueuedReadHandler {
public:
explicit QueuedReadHandler(AsyncCache::ReadRequestState& request_state,
absl::Time time) {
if (!request_state.queued.null() && time >= request_state.queued_time) {
queued_ = std::move(request_state.queued);
request_state.queued_time = absl::InfinitePast();
request_state.queued_request_is_deferred = true;
}
}
~QueuedReadHandler() {
if (!queued_.null()) {
queued_.SetResult(tensorstore::MakeResult());
}
}
private:
Promise<void> queued_;
};
template <typename EntryOrNode>
void ResolveIssuedRead(EntryOrNode& entry_or_node, absl::Status status,
UniqueWriterLock<Entry> lock) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
auto& request_state = entry_or_node.read_request_state_;
auto issued = std::move(request_state.issued);
auto time = GetEffectiveReadRequestState(entry_or_node).read_state.stamp.time;
assert(!issued.null());
assert(!status.ok() || time >= request_state.issued_time);
{
QueuedReadHandler queued_read_handler(request_state, time);
MaybeIssueRead(entry_or_node, std::move(lock), {});
issued.SetResult(tensorstore::MakeResult(status));
}
ReleaseReadRequestReference(entry_or_node);
}
size_t GetReadStateSize(Entry& entry, const void* read_data) {
if (!read_data) return 0;
return entry.ComputeReadDataSizeInBytes(read_data);
}
template <typename EntryOrNode>
void EntryOrNodeReadSuccess(EntryOrNode& entry_or_node,
ReadState&& read_state) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
Entry& entry = GetOwningEntry(entry_or_node);
const size_t read_state_size = GetReadStateSize(entry, read_state.data.get());
UniqueWriterLock lock(entry);
assert(read_state.stamp.time != absl::InfinitePast());
assert(!StorageGeneration::IsUnknown(read_state.stamp.generation));
SetReadState(entry_or_node, std::move(read_state), read_state_size);
ResolveIssuedRead(entry_or_node, absl::OkStatus(), std::move(lock));
}
template <typename EntryOrNode>
void EntryOrNodeReadError(EntryOrNode& entry_or_node, absl::Status error) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
assert(!error.ok());
ResolveIssuedRead(entry_or_node, std::move(error),
UniqueWriterLock{GetOwningEntry(entry_or_node)});
}
void RemoveTransactionFromMap(TransactionNode& node) {
if (TransactionTree::IsDisconnected(node)) {
return;
}
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< node << "RemoveTransactionFromMap";
GetOwningEntry(node).transactions_.Remove(node);
}
void ResolveIssuedWriteback(AsyncCache::TransactionNode& node,
UniqueWriterLock<Entry> lock) {
auto& entry = GetOwningEntry(node);
assert(node.prepare_for_commit_state_.load(std::memory_order_relaxed) ==
PrepareForCommitState::kReadyForCommitCalled);
assert(entry.committing_transaction_node_ &&
entry.committing_transaction_node_->transaction() ==
node.transaction());
assert(entry.read_request_state_.issued.null());
if (entry.committing_transaction_node_ != &node) {
intrusive_linked_list::Remove(PendingWritebackQueueAccessor{}, &node);
} else {
auto* next_node = PendingWritebackQueueAccessor::GetNext(&node);
if (next_node != &node) {
intrusive_linked_list::Remove(PendingWritebackQueueAccessor{}, &node);
if (next_node->transaction() == node.transaction()) {
entry.committing_transaction_node_ = next_node;
} else {
entry.committing_transaction_node_ = next_node;
}
} else {
entry.committing_transaction_node_ = nullptr;
}
}
RemoveTransactionFromMap(node);
MaybeStartReadOrWriteback(entry, std::move(lock), {});
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << node << "CommitDone";
node.CommitDone();
}
}
const ReadState& AsyncCache::ReadState::Unknown() {
static const absl::NoDestructor<ReadState> read_state;
return *read_state;
}
size_t AsyncCache::Entry::ComputeReadDataSizeInBytes(const void* data) {
return 0;
}
size_t AsyncCache::DoGetFixedSizeInBytes(Cache::Entry* entry) {
return this->Cache::DoGetSizeInBytes(entry);
}
size_t AsyncCache::DoGetSizeInBytes(Cache::Entry* base_entry) {
auto* entry = static_cast<Entry*>(base_entry);
return this->DoGetFixedSizeInBytes(entry) +
entry->read_request_state_.read_state_size;
}
Future<const void> AsyncCache::Entry::Read(AsyncCacheReadRequest request,
bool must_not_be_known_to_be_stale) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Read: staleness_bound=" << request.staleness_bound
<< ", must_not_be_known_to_be_stale=" << must_not_be_known_to_be_stale;
return RequestRead(*this, request, must_not_be_known_to_be_stale);
}
void AsyncCache::Entry::ReadSuccess(ReadState&& read_state) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadSuccess: " << read_state.stamp
<< ", data=" << read_state.data.get();
internal::EntryOrNodeReadSuccess(*this, std::move(read_state));
}
void AsyncCache::Entry::ReadError(absl::Status error) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadError: error=" << error;
internal::EntryOrNodeReadError(*this, std::move(error));
}
AsyncCache::TransactionNode::TransactionNode(Entry& entry)
: internal::TransactionState::Node(Cache::PinnedEntry(&entry).release()),
reads_committed_(false),
size_updated_(false) {}
Future<const void> AsyncCache::TransactionNode::Read(
AsyncCacheReadRequest request, bool must_not_be_known_to_be_stale) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Read: staleness_bound=" << request.staleness_bound
<< ", must_not_be_known_to_be_stale=" << must_not_be_known_to_be_stale;
if (reads_committed_ &&
(prepare_for_commit_state_.load(std::memory_order_acquire) !=
PrepareForCommitState::kReadyForCommitCalled)) {
return RequestRead(GetOwningEntry(*this), request,
must_not_be_known_to_be_stale);
}
return RequestRead(*this, request, must_not_be_known_to_be_stale);
}
void AsyncCache::TransactionNode::ReadSuccess(ReadState&& read_state) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadSuccess: " << read_state.stamp
<< ", data=" << read_state.data.get();
internal::EntryOrNodeReadSuccess(*this, std::move(read_state));
}
void AsyncCache::TransactionNode::ReadError(absl::Status error) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadError: error=" << error;
internal::EntryOrNodeReadError(*this, std::move(error));
}
void AsyncCache::TransactionNode::PrepareForCommit() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "PrepareForCommit";
intrusive_ptr_increment(this);
auto& entry = GetOwningEntry(*this);
UniqueWriterLock lock(entry);
RemoveTransactionFromMap(*this);
if (entry.committing_transaction_node_) {
intrusive_linked_list::InsertBefore(PendingWritebackQueueAccessor{},
entry.committing_transaction_node_,
this);
if (entry.committing_transaction_node_->transaction() != transaction()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Commit: enqueuing for writeback";
return;
}
assert(entry.committing_transaction_node_->prepare_for_commit_state_.load(
std::memory_order_relaxed) >=
PrepareForCommitState::kPrepareDoneCalled);
} else {
intrusive_linked_list::Initialize(PendingWritebackQueueAccessor{}, this);
}
entry.committing_transaction_node_ = this;
MaybeStartReadOrWriteback(entry, std::move(lock), {});
}
void AsyncCache::TransactionNode::Abort() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "Abort";
auto& entry = GetOwningEntry(*this);
UniqueWriterLock lock(entry);
RemoveTransactionFromMap(*this);
lock.unlock();
AbortDone();
}
void AsyncCache::TransactionNode::WritebackSuccess(ReadState&& read_state) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "WritebackSuccess: " << read_state.stamp
<< ", data=" << read_state.data.get();
auto& entry = GetOwningEntry(*this);
const size_t read_state_size = GetReadStateSize(entry, read_state.data.get());
UniqueWriterLock lock{entry};
auto& request_state = entry.read_request_state_;
absl::Time read_state_time = read_state.stamp.time;
if (!StorageGeneration::IsUnknown(read_state.stamp.generation)) {
assert(read_state.stamp.generation != StorageGeneration::Invalid());
assert(read_state_time != absl::InfinitePast());
assert(read_state_time >= request_state.read_state.stamp.time);
SetReadState(entry, std::move(read_state), read_state_size);
} else if (read_state_time > request_state.read_state.stamp.time) {
request_state.known_to_be_stale = true;
}
QueuedReadHandler queued_read_handler(request_state, read_state_time);
ResolveIssuedWriteback(*this, std::move(lock));
}
void AsyncCache::TransactionNode::WritebackError() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "WritebackError";
ResolveIssuedWriteback(*this, UniqueWriterLock{GetOwningEntry(*this)});
}
Result<OpenTransactionNodePtr<AsyncCache::TransactionNode>>
AsyncCache::Entry::GetTransactionNodeImpl(OpenTransactionPtr& transaction) {
constexpr auto EnsureTransactionNodeInitialized =
[](AsyncCache::TransactionNode& node,
OpenTransactionPtr& transaction) -> bool {
auto& entry = GetOwningEntry(node);
bool initialized = false;
absl::call_once(node.initialized_, [&] {
const bool new_implicit_transaction = !transaction;
node.initialized_status_ = node.DoInitialize(transaction);
if (node.initialized_status_.ok()) {
if (new_implicit_transaction) {
node.SetTransaction(GetOrCreateOpenTransaction(transaction));
UniqueWriterLock lock(entry);
entry.transactions_.FindOrInsert(
[&](TransactionNode& existing_node) {
return internal::DoThreeWayComparison(
std::less<>{}, transaction.get(),
existing_node.transaction());
},
[&] { return &node; });
}
assert(node.transaction() == transaction.get());
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< node << "New node, new implicit=" << new_implicit_transaction
<< ", transaction=" << transaction.get();
node.initialized_status_ = node.Register();
} else if (!new_implicit_transaction) {
UniqueWriterLock lock(entry);
RemoveTransactionFromMap(node);
}
initialized = true;
});
return initialized;
};
WeakTransactionNodePtr<TransactionNode> node;
if (!transaction) {
WeakTransactionNodePtr<TransactionNode> stale_node;
while (true) {
node.reset(GetOwningCache(*this).DoAllocateTransactionNode(*this));
[[maybe_unused]] bool initialized =
EnsureTransactionNodeInitialized(*node, transaction);
TENSORSTORE_RETURN_IF_ERROR(node->initialized_status_);
assert(initialized);
if (node->IsRevoked()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *node << "Node is revoked";
std::swap(stale_node, node);
continue;
}
node->transaction()->RequestCommit();
break;
}
} else {
size_t min_phase = transaction->phase();
WeakTransactionNodePtr<TransactionNode> stale_node;
while (true) {
UniqueWriterLock lock(*this);
const auto MakeNode = [&] {
auto* node = GetOwningCache(*this).DoAllocateTransactionNode(*this);
node->SetTransaction(*transaction);
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *node << "Adding transaction to map";
return node;
};
auto* candidate_node =
transactions_
.FindOrInsert(
[transaction = transaction.get()](TransactionNode& node) {
return internal::DoThreeWayComparison(
std::less<>{}, transaction, node.transaction());
},
MakeNode)
.first;
if (candidate_node == stale_node.get()) {
auto* new_node = MakeNode();
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *candidate_node << "Replacing in map";
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *new_node << "Adding to map";
transactions_.Replace(*candidate_node, *new_node);
candidate_node = new_node;
}
node.reset(candidate_node);
lock.unlock();
stale_node.reset();
EnsureTransactionNodeInitialized(*node, transaction);
TENSORSTORE_RETURN_IF_ERROR(node->initialized_status_);
if (node->phase() >= min_phase && !node->IsRevoked()) {
break;
}
stale_node = std::move(node);
}
}
OpenTransactionPtr(node->transaction()).release();
return OpenTransactionNodePtr<TransactionNode>(node.release(),
internal::adopt_object_ref);
}
void AsyncCache::TransactionNode::Commit() { intrusive_ptr_decrement(this); }
void AsyncCache::TransactionNode::WriterLock() { mutex_.WriterLock(); }
void AsyncCache::TransactionNode::WriterUnlock() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "unlock";
UniqueWriterLock lock(mutex_, std::adopt_lock);
if (!size_updated_) return;
size_updated_ = false;
const size_t new_size = this->ComputeWriteStateSizeInBytes();
const size_t change = new_size - std::exchange(write_state_size_, new_size);
if (change == 0) return;
this->UpdateSizeInBytes(change);
}
bool AsyncCache::TransactionNode::try_lock() {
mutex_.WriterLock();
if (!IsRevoked()) return true;
mutex_.WriterUnlock();
return false;
}
size_t AsyncCache::TransactionNode::ComputeWriteStateSizeInBytes() { return 0; }
absl::Status AsyncCache::TransactionNode::DoInitialize(
internal::OpenTransactionPtr& transaction) {
return absl::OkStatus();
}
void AsyncCache::TransactionNode::DoApply(ApplyOptions options,
ApplyReceiver receiver) {
ABSL_UNREACHABLE();
}
void AsyncCache::TransactionNode::Revoke() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "Revoke";
revoked_.store(true, std::memory_order_release);
}
void AsyncCache::TransactionNode::InvalidateReadState() {
assert(this->transaction()->commit_started());
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "InvalidateReadState";
this->read_request_state_.read_state = ReadState{};
}
AsyncCache::TransactionNode::~TransactionNode() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "~TransactionNode";
Cache::PinnedEntry(static_cast<Cache::Entry*>(associated_data()),
adopt_object_ref);
}
#ifdef TENSORSTORE_ASYNC_CACHE_DEBUG
AsyncCache::Entry::~Entry() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "~Entry";
}
#endif
}
} | #include "tensorstore/internal/cache/async_cache.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Future;
using ::tensorstore::no_transaction;
using ::tensorstore::Transaction;
using ::tensorstore::UniqueWriterLock;
using ::tensorstore::internal::AsyncCache;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::TransactionState;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal::WeakTransactionNodePtr;
using ::tensorstore::internal_testing::TestConcurrent;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
struct RequestLog {
struct ReadRequest {
AsyncCache::Entry* entry;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
entry->ReadSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) { entry->ReadError(std::move(error)); }
};
struct TransactionReadRequest {
AsyncCache::TransactionNode* node;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
node->ReadSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) { node->ReadError(std::move(error)); }
};
struct WritebackRequest {
AsyncCache::TransactionNode* node;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
node->WritebackSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) {
node->SetError(error);
node->WritebackError();
}
};
tensorstore::internal::ConcurrentQueue<ReadRequest> reads;
tensorstore::internal::ConcurrentQueue<TransactionReadRequest>
transaction_reads;
tensorstore::internal::ConcurrentQueue<WritebackRequest> writebacks;
void HandleWritebacks() {
while (auto req = writebacks.pop_nonblock()) {
req->Success();
}
}
};
class TestCache : public tensorstore::internal::AsyncCache {
using Base = tensorstore::internal::AsyncCache;
public:
using ReadData = size_t;
class Entry : public AsyncCache::Entry {
public:
using OwningCache = TestCache;
auto CreateWriteTransaction(OpenTransactionPtr transaction = {}) {
return GetTransactionNode(*this, transaction).value();
}
Future<const void> CreateWriteTransactionFuture(
OpenTransactionPtr transaction = {}) {
return CreateWriteTransaction(std::move(transaction))
->transaction()
->future();
}
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).log_->reads.push(RequestLog::ReadRequest{this});
}
size_t ComputeReadDataSizeInBytes(const void* data) override {
return *static_cast<const size_t*>(data);
}
absl::Status do_initialize_transaction_error;
bool share_implicit_transaction_nodes = true;
};
class TransactionNode : public Base::TransactionNode {
public:
using OwningCache = TestCache;
using Base::TransactionNode::TransactionNode;
absl::Status DoInitialize(OpenTransactionPtr& transaction) override {
TENSORSTORE_RETURN_IF_ERROR(
this->Base::TransactionNode::DoInitialize(transaction));
auto& entry = GetOwningEntry(*this);
++value;
SetReadsCommitted();
return entry.do_initialize_transaction_error;
}
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).log_->transaction_reads.push(
RequestLog::TransactionReadRequest{this});
}
void Commit() override {
GetOwningCache(*this).log_->writebacks.push(
RequestLog::WritebackRequest{this});
Base::TransactionNode::Commit();
}
size_t ComputeWriteStateSizeInBytes() override { return size; }
int value = 0;
size_t size = 0;
};
TestCache(RequestLog* log) : log_(log) {}
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
private:
RequestLog* log_;
};
TEST(AsyncCacheTest, ReadBasic) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
absl::Time read_time1, read_time2;
{
auto init_time = absl::Now();
auto read_future = entry->Read({init_time});
ASSERT_FALSE(read_future.ready());
{
auto read_future2 = entry->Read({init_time});
EXPECT_TRUE(HaveSameSharedState(read_future, read_future2));
}
ASSERT_EQ(1u, log.reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time1 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(absl::InfinitePast(),
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time1);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
{
auto read_future3 = entry->Read({read_time1});
ASSERT_TRUE(read_future3.ready());
TENSORSTORE_EXPECT_OK(read_future3);
ASSERT_TRUE(log.reads.empty());
ASSERT_TRUE(log.writebacks.empty());
}
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1u, log.reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time2 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time1,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = UniqueNow();
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
EXPECT_FALSE(HaveSameSharedState(read_future, read_future1));
{
auto read_future2 = entry->Read({absl::InfiniteFuture()});
EXPECT_TRUE(HaveSameSharedState(read_future1, read_future2));
}
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time2,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto read_time2 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
auto read_future1 = entry->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = absl::Now();
{
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
{
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
auto read_future1 = entry->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, ReadFailed) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
const auto read_status = absl::UnknownError("read failed");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Error(read_status);
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
EXPECT_EQ(read_status, read_future.status());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, ReadFailedAfterSuccessfulRead) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
const auto read_status = absl::UnknownError("read failed");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Error(read_status);
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
EXPECT_EQ(read_status, read_future.status());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, NonTransactionalWrite) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
Future<const void> write_future;
{
auto node = entry->CreateWriteTransaction();
weak_node.reset(node.get());
write_future = node->transaction()->future();
}
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, NonTransactionalWriteback) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto write_future = entry->CreateWriteTransactionFuture();
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
auto write_time = absl::Now();
{
auto write_req = log.writebacks.pop();
write_req.Success(write_time);
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_future = entry->Read({write_time});
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
EXPECT_FALSE(read_future.ready());
auto read_req = log.reads.pop();
read_req.Success();
EXPECT_TRUE(read_future.ready());
}
}
TEST(AsyncCacheTest, WritebackRequestedWithReadIssued) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto write_future = entry->CreateWriteTransactionFuture();
write_future.Force();
ASSERT_FALSE(write_future.ready());
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_FALSE(write_future.ready());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_ASSERT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, WritebackRequestedByCache) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto write_future = entry->CreateWriteTransactionFuture();
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, TransactionalReadBasic) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node.reset(node.get());
}
absl::Time read_time1, read_time2;
auto commit_future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
{
auto init_time = absl::Now();
auto read_future = weak_node->Read({init_time});
ASSERT_FALSE(read_future.ready());
{
auto read_future2 = weak_node->Read({init_time});
EXPECT_TRUE(HaveSameSharedState(read_future, read_future2));
}
ASSERT_EQ(1u, log.transaction_reads.size());
read_time1 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(absl::InfinitePast(),
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time1);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
{
auto read_future3 = weak_node->Read({read_time1});
ASSERT_TRUE(read_future3.ready());
TENSORSTORE_EXPECT_OK(read_future3);
ASSERT_TRUE(log.transaction_reads.empty());
ASSERT_TRUE(log.writebacks.empty());
}
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1u, log.transaction_reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time2 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time1,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = UniqueNow();
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
EXPECT_FALSE(HaveSameSharedState(read_future, read_future1));
{
auto read_future2 = weak_node->Read({absl::InfiniteFuture()});
EXPECT_TRUE(HaveSameSharedState(read_future1, read_future2));
}
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time2,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto read_time2 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = absl::Now();
{
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
{
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
write_req.Success();
ASSERT_TRUE(commit_future.ready());
TENSORSTORE_EXPECT_OK(commit_future);
}
TEST(AsyncCacheTest, TransactionalWritebackSuccess) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node.reset(node.get());
}
auto future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Success();
}
ASSERT_TRUE(future.ready());
TENSORSTORE_EXPECT_OK(future);
}
TEST(AsyncCacheTest, TransactionalWritebackError) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
weak_node.reset(entry->CreateWriteTransaction(open_transaction).get());
}
auto future = transaction.CommitAsync();
auto error = absl::UnknownError("write error");
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Error(error);
}
ASSERT_TRUE(future.ready());
EXPECT_EQ(error, future.status());
}
TEST(AsyncCacheTest, ConcurrentTransactionCommit) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
static constexpr size_t kNumEntries = 2;
tensorstore::internal::PinnedCacheEntry<TestCache> entries[kNumEntries];
for (size_t i = 0; i < kNumEntries; ++i) {
entries[i] = GetCacheEntry(cache, tensorstore::StrCat(i));
}
static constexpr size_t kNumTransactions = 3;
std::vector<Transaction> transactions(kNumTransactions, no_transaction);
TestConcurrent<kNumTransactions>(
100,
[&] {
for (size_t i = 0; i < kNumTransactions; ++i) {
auto& transaction = transactions[i];
transaction = Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
transaction));
for (size_t j = 0; j < kNumEntries; ++j) {
entries[(i + j) % kNumEntries]->CreateWriteTransaction(
open_transaction);
}
ASSERT_FALSE(transaction.future().ready());
}
},
[&] {
TransactionState* expected_transactions[kNumTransactions];
for (size_t i = 0; i < kNumTransactions; ++i) {
auto& transaction = transactions[i];
ASSERT_TRUE(transaction.commit_started());
ASSERT_FALSE(transaction.future().ready());
expected_transactions[i] = TransactionState::get(transaction);
}
TransactionState* transaction_order[kNumTransactions];
for (size_t i = 0; i < kNumTransactions; ++i) {
PinnedCacheEntry<TestCache> entry_order[kNumEntries];
ASSERT_EQ(kNumEntries, log.writebacks.size());
for (size_t j = 0; j < kNumEntries; ++j) {
auto write_req = log.writebacks.pop();
entry_order[j].reset(static_cast<TestCache::Entry*>(
&GetOwningEntry(*write_req.node)));
if (j == 0) {
transaction_order[i] = write_req.node->transaction();
} else {
ASSERT_EQ(transaction_order[i], write_req.node->transaction());
}
write_req.Success();
}
EXPECT_THAT(entry_order,
::testing::UnorderedElementsAreArray(entries));
}
EXPECT_THAT(transaction_order, ::testing::UnorderedElementsAreArray(
expected_transactions));
for (auto& transaction : transactions) {
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_ASSERT_OK(transaction.future());
transaction = no_transaction;
}
},
[&](size_t i) { transactions[i].CommitAsync().IgnoreFuture(); });
}
TEST(AsyncCacheTest, DoInitializeTransactionError) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
entry->do_initialize_transaction_error = absl::UnknownError("initialize");
{
OpenTransactionPtr transaction;
EXPECT_THAT(
GetTransactionNode(*entry, transaction).status(),
tensorstore::MatchesStatus(absl::StatusCode::kUnknown, "initialize.*"));
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
Transaction(tensorstore::isolated)));
EXPECT_THAT(
GetTransactionNode(*entry, transaction).status(),
tensorstore::MatchesStatus(absl::StatusCode::kUnknown, "initialize.*"));
}
}
TEST(AsyncCacheTest, ConcurrentInitializeExplicitTransaction) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
OpenTransactionPtr open_transaction;
TestConcurrent<2>(
100,
[&] {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
Transaction(tensorstore::isolated)));
},
[] {},
[&](size_t i) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto node, GetTransactionNode(*entry, open_transaction));
EXPECT_EQ(1, node->value);
});
}
TEST(AsyncCacheTest, ConcurrentInitializeImplicitTransaction) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
TestConcurrent<2>(
100,
[] {},
[&] { log.HandleWritebacks(); },
[&](size_t i) {
OpenTransactionPtr transaction;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto node, GetTransactionNode(*entry, transaction));
EXPECT_EQ(1, node->value);
});
}
TEST(AsyncCacheTest, ShareImplicitTransactionNodesFalse) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto node = entry->CreateWriteTransaction();
auto node2 = entry->CreateWriteTransaction();
EXPECT_NE(node, node2);
node = {};
node2 = {};
log.HandleWritebacks();
}
TEST(AsyncCacheTest, ReadSizeInBytes) {
auto pool = CachePool::Make(CachePool::Limits{20000});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
{
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::Now()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(19000));
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::Pointee(19000));
auto read_future = entry->Read({absl::InfiniteFuture()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(21000));
ASSERT_TRUE(read_future.ready());
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::IsNull());
auto read_future = entry->Read({absl::InfiniteFuture()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(1000));
ASSERT_TRUE(read_future.ready());
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::Pointee(1000));
auto write_future = entry->CreateWriteTransactionFuture();
write_future.Force();
log.writebacks.pop().Success(absl::Now(), std::make_shared<size_t>(21000));
ASSERT_TRUE(write_future.ready());
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::IsNull());
}
}
TEST(AsyncCacheTest, ExplicitTransactionSize) {
auto pool = CachePool::Make(CachePool::Limits{20000});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
{
auto entry_b = GetCacheEntry(cache, "b");
auto read_future = entry_b->Read({absl::Now()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(1000));
}
auto transaction = Transaction(tensorstore::isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
{
auto entry_a = GetCacheEntry(cache, "a");
{
auto node = entry_a->CreateWriteTransaction(open_transaction);
UniqueWriterLock lock(*node);
node->size = 100000;
node->MarkSizeUpdated();
}
EXPECT_EQ(100000, transaction.total_bytes());
auto entry_c = GetCacheEntry(cache, "c");
{
auto node = entry_c->CreateWriteTransaction(open_transaction);
UniqueWriterLock lock(*node);
node->size = 500;
node->MarkSizeUpdated();
}
EXPECT_EQ(100500, transaction.total_bytes());
{
auto node = entry_a->CreateWriteTransaction(open_transaction);
UniqueWriterLock lock(*node);
node->size = 110000;
node->MarkSizeUpdated();
}
EXPECT_EQ(110500, transaction.total_bytes());
}
{
auto entry_b = GetCacheEntry(cache, "b");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry_b).data(),
::testing::Pointee(1000));
}
}
void TestRevokedTransactionNode(bool reverse_order) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node1;
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node2;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
{
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node1.reset(node.get());
node->Revoke();
}
{
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node2.reset(node.get());
}
}
auto future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
{
auto write_req1 = log.writebacks.pop();
EXPECT_EQ(weak_node1.get(), write_req1.node);
auto write_req2 = log.writebacks.pop();
EXPECT_EQ(weak_node2.get(), write_req2.node);
if (reverse_order) {
write_req2.Success();
write_req1.Success();
} else {
write_req1.Success();
write_req2.Success();
}
}
ASSERT_TRUE(future.ready());
TENSORSTORE_EXPECT_OK(future);
}
TEST(AsyncCacheTest, RevokedTransactionNodeFifo) {
TestRevokedTransactionNode(false);
}
TEST(AsyncCacheTest, RevokedTransactionNodeLifo) {
TestRevokedTransactionNode(true);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/async_cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/async_cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b42f1177-a746-48c5-936f-0579562c34b0 | cpp | tensorflow/tensorflow | tfl_tensor_ref | tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.cc | tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include <cstddef>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace ml_adj {
namespace data {
using ::tflite::BuildTfLiteArray;
using ::tflite::TfLiteArrayUniquePtr;
using ::tflite::TfLiteTypeGetSize;
namespace {
etype_t TflToLibType(const TfLiteType tfl_type) {
switch (tfl_type) {
case kTfLiteFloat32:
return etype_t::f32;
case kTfLiteInt32:
return etype_t::i32;
case kTfLiteFloat64:
return etype_t::f64;
default:
return etype_t::i32;
}
}
}
TflTensorRef::TflTensorRef(const TfLiteTensor* tfl_tensor)
: DataRef(TflToLibType(tfl_tensor->type)), tfl_tensor_(tfl_tensor) {
dims_.assign(tfl_tensor->dims->data,
tfl_tensor->dims->data + tfl_tensor->dims->size);
}
const void* TflTensorRef::Data() const { return tfl_tensor_->data.data; }
ind_t TflTensorRef::NumElements() const {
return tfl_tensor_->bytes / TfLiteTypeGetSize(tfl_tensor_->type);
}
size_t TflTensorRef::Bytes() const { return tfl_tensor_->bytes; }
MutableTflTensorRef::MutableTflTensorRef(TfLiteTensor* tfl_tensor,
TfLiteContext* tfl_ctx)
: MutableDataRef(TflToLibType(tfl_tensor->type)),
tfl_tensor_(tfl_tensor),
tfl_ctx_(tfl_ctx) {
dims_.assign(tfl_tensor->dims->data,
tfl_tensor->dims->data + tfl_tensor->dims->size);
}
void MutableTflTensorRef::Resize(dims_t&& dims) {
TfLiteArrayUniquePtr<int> arr =
BuildTfLiteArray(std::vector<int>(dims.begin(), dims.end()));
TFLITE_CHECK_EQ(tfl_ctx_->ResizeTensor(tfl_ctx_, tfl_tensor_, arr.release()),
kTfLiteOk);
dims_ = dims;
}
const void* MutableTflTensorRef::Data() const { return tfl_tensor_->data.data; }
ind_t MutableTflTensorRef::NumElements() const {
return tfl_tensor_->bytes / TfLiteTypeGetSize(tfl_tensor_->type);
}
size_t MutableTflTensorRef::Bytes() const { return tfl_tensor_->bytes; }
void* MutableTflTensorRef::Data() { return tfl_tensor_->data.data; }
}
} | #include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::Each;
using ::tflite::BuildTfLiteTensor;
using ::tflite::DimsAre;
using ::tflite::NumElements;
using ::tflite::TensorUniquePtr;
TfLiteStatus SimpleResizeTensor(TfLiteContext*, TfLiteTensor* tensor,
TfLiteIntArray* new_size) {
TFLITE_CHECK(tensor->type == kTfLiteFloat32);
size_t num_bytes = NumElements(new_size) * sizeof(float);
TF_LITE_ENSURE_STATUS(TfLiteTensorRealloc(num_bytes, tensor));
if (tensor->dims != nullptr) {
TfLiteIntArrayFree(tensor->dims);
}
tensor->dims = new_size;
return kTfLiteOk;
}
std::unique_ptr<TfLiteContext> MakeSimpleContext() {
auto ctx = std::make_unique<TfLiteContext>();
ctx->ResizeTensor = SimpleResizeTensor;
return ctx;
}
TEST(ImmutableTensorRefTest, ConstructsAndManifestsTensorData) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 2}, kTfLiteDynamic);
std::fill(tfl_tensor->data.f, tfl_tensor->data.f + 4, 2.0f);
TflTensorRef ref(tfl_tensor.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<const float> data(reinterpret_cast<const float*>(ref.Data()), 4);
EXPECT_THAT(data, Each(2.0f));
}
TEST(MutableTensorRefTest, ConstructsAndManifestsTensorData) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 2}, kTfLiteDynamic);
std::fill(tfl_tensor->data.f, tfl_tensor->data.f + 4, 2.0f);
MutableTflTensorRef ref(tfl_tensor.get(), nullptr);
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<const float> data(reinterpret_cast<const float*>(ref.Data()), 4);
EXPECT_THAT(data, Each(2.0f));
}
TEST(MutableTensorRefTest, TensorRefWritesDataToTensor) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {3, 3}, kTfLiteDynamic);
MutableTflTensorRef ref(tfl_tensor.get(), nullptr);
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{3, 3}));
ASSERT_EQ(ref.Bytes(), 9 * sizeof(float));
absl::Span<float> data(reinterpret_cast<float*>(ref.Data()), 9);
std::fill(data.begin(), data.end(), 3.0f);
EXPECT_THAT(absl::Span<const float>(tfl_tensor->data.f, 9), Each(3.0f));
}
TEST(MutableTensorRefTest, ResizeIncreaseSize) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 3}, kTfLiteDynamic);
std::unique_ptr<TfLiteContext> ctx = MakeSimpleContext();
MutableTflTensorRef ref(tfl_tensor.get(), ctx.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 3}));
ASSERT_EQ(ref.Bytes(), 6 * sizeof(float));
ref.Resize({3, 3});
ASSERT_EQ(ref.Dims(), (dims_t{3, 3}));
ASSERT_EQ(ref.Bytes(), 9 * sizeof(float));
absl::Span<float> ref_data(reinterpret_cast<float*>(ref.Data()), 9);
ASSERT_THAT(tfl_tensor.get(), DimsAre({3, 3}));
ASSERT_EQ(tfl_tensor->bytes, ref.Bytes());
ASSERT_EQ(ref.Data(), tfl_tensor->data.data);
}
TEST(MutableTensorRefTest, ResizeDecreasesSize) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 3}, kTfLiteDynamic);
std::unique_ptr<TfLiteContext> ctx = MakeSimpleContext();
MutableTflTensorRef ref(tfl_tensor.get(), ctx.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 3}));
ASSERT_EQ(ref.Bytes(), 6 * sizeof(float));
ref.Resize({2, 2});
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<float> ref_data(reinterpret_cast<float*>(ref.Data()), 4);
ASSERT_THAT(tfl_tensor.get(), DimsAre({2, 2}));
ASSERT_EQ(tfl_tensor->bytes, ref.Bytes());
ASSERT_EQ(ref.Data(), tfl_tensor->data.data);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c343d7a-ba36-4203-8386-ade34582bb27 | cpp | google/arolla | bound_operators | arolla/qexpr/bound_operators.cc | arolla/qexpr/bound_operators_test.cc | #include "arolla/qexpr/bound_operators.h"
#include <cstdint>
#include <memory>
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
namespace arolla {
std::unique_ptr<BoundOperator> JumpBoundOperator(int64_t jump) {
return MakeBoundOperator([=](EvaluationContext* ctx, FramePtr frame) {
ctx->set_requested_jump(jump);
});
}
std::unique_ptr<BoundOperator> JumpIfNotBoundOperator(
FrameLayout::Slot<bool> cond_slot, int64_t jump) {
return MakeBoundOperator([=](EvaluationContext* ctx, FramePtr frame) {
if (!frame.Get(cond_slot)) {
ctx->set_requested_jump(jump);
}
});
}
} | #include "arolla/qexpr/bound_operators.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::testing::Eq;
template <typename T>
using Slot = FrameLayout::Slot<T>;
absl::StatusOr<std::unique_ptr<BoundOperator>> CreateAddFloatsBoundOp(
absl::Span<const TypedSlot> input_slots,
Slot<OptionalValue<float>> output_slot) {
std::vector<Slot<bool>> input_cond_slots;
std::vector<Slot<float>> input_value_slots;
for (const auto& typed_input_slot : input_slots) {
QTypePtr input_type = typed_input_slot.GetType();
if (IsOptionalQType(input_type)) {
ASSIGN_OR_RETURN(auto input_slot,
typed_input_slot.ToSlot<OptionalValue<float>>());
input_cond_slots.push_back(GetPresenceSubslotFromOptional(input_slot));
input_value_slots.push_back(GetValueSubslotFromOptional(input_slot));
} else {
ASSIGN_OR_RETURN(auto value_slot, typed_input_slot.ToSlot<float>());
input_value_slots.push_back(value_slot);
}
}
Slot<bool> output_presence_slot = output_slot.GetSubslot<0>();
Slot<float> output_value_slot = output_slot.GetSubslot<1>();
auto add_op =
FunctorBoundOperator([input_value_slots, output_value_slot](
EvaluationContext* ctx, FramePtr frame) {
float result = 0.0f;
for (auto input_value_slot : input_value_slots) {
result += frame.Get(input_value_slot);
}
frame.Set(output_value_slot, result);
});
return std::unique_ptr<BoundOperator>(new WhereAllBoundOperator(
input_cond_slots, output_presence_slot, add_op));
}
TEST(BoundOperators, RunBoundOperators) {
FrameLayout::Builder layout_builder;
Slot<int32_t> x_slot = layout_builder.AddSlot<int32_t>();
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
ASSERT_THAT(alloc.frame().Get(x_slot), Eq(0));
auto make_increment_operator = [x_slot](int32_t increment) {
return MakeBoundOperator(
[x_slot, increment](EvaluationContext* ctx, FramePtr frame) {
frame.Set(x_slot, frame.Get(x_slot) + increment);
});
};
std::vector<std::unique_ptr<BoundOperator>> bound_operators;
bound_operators.push_back(make_increment_operator(1));
bound_operators.push_back(make_increment_operator(10));
bound_operators.push_back(make_increment_operator(100));
EvaluationContext ctx;
EXPECT_EQ(RunBoundOperators(bound_operators, &ctx, alloc.frame()), 2);
EXPECT_THAT(alloc.frame().Get(x_slot), Eq(111));
EXPECT_THAT(ctx.status(), IsOk());
}
TEST(BoundOperators, RunBoundOperators_WithError) {
FrameLayout::Builder layout_builder;
Slot<int32_t> x_slot = layout_builder.AddSlot<int32_t>();
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
ASSERT_THAT(alloc.frame().Get(x_slot), Eq(0));
auto make_increment_operator = [x_slot](int32_t increment) {
return MakeBoundOperator(
[x_slot, increment](EvaluationContext* ctx, FramePtr frame) {
frame.Set(x_slot, frame.Get(x_slot) + increment);
});
};
std::vector<std::unique_ptr<BoundOperator>> bound_operators;
bound_operators.push_back(make_increment_operator(1));
bound_operators.push_back(make_increment_operator(10));
bound_operators.push_back(
MakeBoundOperator([](EvaluationContext* ctx, FramePtr frame) {
ctx->set_status(absl::InvalidArgumentError("foo"));
}));
bound_operators.push_back(make_increment_operator(100));
EvaluationContext ctx;
EXPECT_EQ(RunBoundOperators(bound_operators, &ctx, alloc.frame()), 2);
EXPECT_THAT(alloc.frame().Get(x_slot), Eq(11));
EXPECT_THAT(ctx.status(),
StatusIs(absl::StatusCode::kInvalidArgument, "foo"));
}
TEST(BoundOperators, RunBoundOperators_WithJump) {
FrameLayout::Builder layout_builder;
Slot<int32_t> x_slot = layout_builder.AddSlot<int32_t>();
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
ASSERT_THAT(alloc.frame().Get(x_slot), Eq(0));
auto make_increment_operator = [x_slot](int32_t increment) {
return MakeBoundOperator(
[x_slot, increment](EvaluationContext* ctx, FramePtr frame) {
frame.Set(x_slot, frame.Get(x_slot) + increment);
});
};
std::vector<std::unique_ptr<BoundOperator>> bound_operators;
bound_operators.push_back(make_increment_operator(1));
bound_operators.push_back(JumpBoundOperator(1));
bound_operators.push_back(make_increment_operator(10));
bound_operators.push_back(make_increment_operator(100));
EvaluationContext ctx;
EXPECT_EQ(RunBoundOperators(bound_operators, &ctx, alloc.frame()), 3);
EXPECT_THAT(alloc.frame().Get(x_slot), Eq(101));
EXPECT_THAT(ctx.status(), IsOk());
}
TEST(BoundOperators, WhereAll) {
FrameLayout::Builder layout_builder;
auto input1 = layout_builder.AddSlot<OptionalValue<float>>();
auto input2 = layout_builder.AddSlot<OptionalValue<float>>();
auto input3 = layout_builder.AddSlot<float>();
auto input4 = layout_builder.AddSlot<float>();
auto result = layout_builder.AddSlot<OptionalValue<float>>();
ASSERT_OK_AND_ASSIGN(
auto op1, CreateAddFloatsBoundOp(
ToTypedSlots(input1, input2, input3, input4), result));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext root_ctx(&layout);
root_ctx.Set(input1, 1.0f);
root_ctx.Set(input2, 10.0f);
root_ctx.Set(input3, 100.0f);
root_ctx.Set(input4, 1000.0f);
EvaluationContext ctx(root_ctx);
op1->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
EXPECT_EQ(root_ctx.Get(result), OptionalValue<float>{1111.0f});
root_ctx.Set(input2, std::nullopt);
root_ctx.Set(result, 0.0f);
op1->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
EXPECT_EQ(root_ctx.Get(result), OptionalValue<float>{});
EXPECT_EQ(root_ctx.Get(result).value, 0.0f);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/bound_operators.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/bound_operators_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
43a8a8a6-4fb8-4471-9a04-067387bd6f07 | cpp | tensorflow/tensorflow | cpu_runtime | third_party/xla/xla/service/cpu/cpu_runtime.cc | third_party/xla/xla/service/cpu/cpu_runtime_test.cc | #include "xla/service/cpu/cpu_runtime.h"
#include <cstdarg>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/layout_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "xla/service/cpu/in_process_collectives.h"
#include "xla/service/cpu/xfeed_manager.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace cpu {
namespace runtime {
XfeedManager* GetXfeedManager(int device_ordinal) {
static auto* managers = new absl::flat_hash_map<int, XfeedManager*>();
static absl::Mutex* mutex = new absl::Mutex();
absl::MutexLock lock(mutex);
auto it = managers->find(device_ordinal);
if (it == managers->end()) {
it = managers->emplace(device_ordinal, new XfeedManager()).first;
}
return it->second;
}
int GetDeviceOrdinal(const xla::ExecutableRunOptions* run_options) {
if (!run_options) {
return 0;
} else if (run_options->device_ordinal() != -1) {
return run_options->device_ordinal();
}
return run_options->stream()->parent()->device_ordinal();
}
extern const char* const kEigenMatMulF16SymbolName =
"__xla_cpu_runtime_EigenMatMulF16";
extern const char* const kEigenMatMulF32SymbolName =
"__xla_cpu_runtime_EigenMatMulF32";
extern const char* const kEigenMatMulF64SymbolName =
"__xla_cpu_runtime_EigenMatMulF64";
extern const char* const kEigenMatMulC64SymbolName =
"__xla_cpu_runtime_EigenMatMulC64";
extern const char* const kEigenMatMulC128SymbolName =
"__xla_cpu_runtime_EigenMatMulC128";
extern const char* const kEigenMatMulS32SymbolName =
"__xla_cpu_runtime_EigenMatMulS32";
extern const char* const kEigenBatchMatMulF32SymbolName =
"__xla_cpu_runtime_EigenBatchMatMulF32";
extern const char* const kMKLConv2DF32SymbolName =
"__xla_cpu_runtime_MKLConv2DF32";
extern const char* const kACLConv2DF32SymbolName =
"__xla_cpu_runtime_ACLConv2DF32";
extern const char* const kACLMatMulF32SymbolName =
"__xla_cpu_runtime_ACLMatMulF32";
extern const char* const kACLBatchMatMulF32SymbolName =
"__xla_cpu_runtime_ACLBatchMatMulF32";
extern const char* const kEigenConv2DF16SymbolName =
"__xla_cpu_runtime_EigenConv2DF16";
extern const char* const kEigenConv2DF32SymbolName =
"__xla_cpu_runtime_EigenConv2DF32";
extern const char* const kEigenConv3DF16SymbolName =
"__xla_cpu_runtime_EigenConv3DF16";
extern const char* const kEigenConv3DF32SymbolName =
"__xla_cpu_runtime_EigenConv3DF32";
extern const char* const kDuccFftSymbolName = "__xla_cpu_runtime_DuccFft";
extern const char* const kDuccSingleThreadedFftSymbolName =
"__xla_cpu_runtime_DuccSingleThreadedFft";
extern const char* const kEigenSingleThreadedMatMulF8E4M3FNSymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF8E4M3FN";
extern const char* const kEigenSingleThreadedMatMulF8E5M2SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF8E5M2";
extern const char* const kEigenSingleThreadedMatMulF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF16";
extern const char* const kEigenSingleThreadedMatMulF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF32";
extern const char* const kEigenSingleThreadedMatMulF64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF64";
extern const char* const kEigenSingleThreadedMatMulC64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC64";
extern const char* const kEigenSingleThreadedMatMulC128SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC128";
extern const char* const kEigenSingleThreadedMatMulS32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulS32";
extern const char* const kEigenSingleThreadedMatMulU8SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulU8";
extern const char* const kEigenSingleThreadedConv2DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF16";
extern const char* const kEigenSingleThreadedConv2DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF32";
extern const char* const kEigenSingleThreadedConv3DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF16";
extern const char* const kEigenSingleThreadedConv3DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF32";
extern const char* const kAcquireInfeedBufferForDequeueSymbolName =
"__xla_cpu_runtime_AcquireInfeedBufferForDequeue";
extern const char* const kReleaseInfeedBufferAfterDequeueSymbolName =
"__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue";
extern const char* const kAcquireOutfeedBufferForPopulationSymbolName =
"__xla_cpu_runtime_AcquireOutfeedBufferForPopulation";
extern const char* const kReleaseOutfeedBufferAfterPopulationSymbolName =
"__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation";
extern const char* const kParallelForkJoinSymbolName =
"__xla_cpu_runtime_ParallelForkJoin";
extern const char* const kPrintfToStderrSymbolName =
"__xla_cpu_runtime_PrintfToStderr";
extern const char* const kStatusIsSuccessSymbolName =
"__xla_cpu_runtime_StatusIsSuccess";
extern const char* const kKeyValueSortSymbolName =
"__xla_cpu_runtime_KeyValueSort";
extern const char* const kTopKF32SymbolName = "__xla_cpu_runtime_TopKF32";
extern const char* const kTracingStartSymbolName =
"__xla_cpu_runtime_TracingStart";
extern const char* const kTracingEndSymbolName = "__xla_cpu_runtime_TracingEnd";
extern const char* const kXlaCpuRuntimeSymbolNamePrefix = "__xla_cpu_runtime_";
extern const char* const kAllReduceSymbolName = "__xla_cpu_runtime_AllReduce";
extern const char* const kAllGatherSymbolName = "__xla_cpu_runtime_AllGather";
extern const char* const kReduceScatterSymbolName =
"__xla_cpu_runtime_ReduceScatter";
extern const char* const kAllToAllSymbolName = "__xla_cpu_runtime_AllToAll";
extern const char* const kCollectivePermuteSymbolName =
"__xla_cpu_runtime_CollectivePermute";
extern const char* const kPartitionIdSymbolName =
"__xla_cpu_runtime_PartitionId";
extern const char* const kReplicaIdSymbolName = "__xla_cpu_runtime_ReplicaId";
extern const char* const kOneDnnMatMulSymbolName =
"__xla_cpu_runtime_OneDnnMatMul";
extern const char* const kOneDnnSoftmaxSymbolName =
"__xla_cpu_runtime_OneDnnSoftmax";
extern const char* const kOneDnnLayerNormSymbolName =
"__xla_cpu_runtime_OneDnnLayerNorm";
extern const char* const kOneDnnConvolutionSymbolName =
"__xla_cpu_runtime_OneDnnConvolution";
extern const char* const kOneDnnMatMulReorderSymbolName =
"__xla_cpu_runtime_OneDnnMatMulReorder";
extern const char* const kHandleFfiCallSymbolName =
"__xla_cpu_runtime_HandleFfiCall";
namespace {
absl::StatusOr<Shape> DecodeSelfDescribingShapeConstant(const void* shape_ptr,
int32_t size_bytes) {
ShapeProto shape_proto;
if (!shape_proto.ParseFromArray(shape_ptr, size_bytes)) {
return tsl::errors::Internal("Failed parsing the shape proto");
}
Shape shape(shape_proto);
auto status = ShapeUtil::ValidateShape(shape);
if (!status.ok()) {
return status;
}
return std::move(shape);
}
std::string ShapeString(const void* shape_ptr, int32_t shape_length) {
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
if (shape.ok()) {
return ShapeUtil::HumanStringWithLayout(shape.value());
}
return "<invalid shape>";
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireInfeedBufferForDequeueImpl(const ExecutableRunOptions* run_options,
int32_t buffer_length,
const void* shape,
int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireInfeedBufferForDequeue: "
<< ShapeString(shape, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->infeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program infeed request buffer size " << buffer_length
<< " did not match the runtime's infed buffer length " << buffer->length()
<< "; program reports desired shape: "
<< ShapeString(shape, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseInfeedBufferAfterDequeueImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseInfeedBufferAfterDeque: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->infeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireOutfeedBufferForPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireOutfeedBufferForPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->outfeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program outfeed request buffer size " << buffer_length
<< " did not match the runtime's outfeed buffer length "
<< buffer->length() << "; program reports outfed shape: "
<< ShapeString(shape_ptr, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseOutfeedBufferAfterPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseOutfeedBufferAfterPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->outfeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReplicaIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
int32_t replica_id = run_options->device_assignment()
->ReplicaIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &replica_id, 4);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void PartitionIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
const DeviceAssignment::LogicalID logical_id =
run_options->device_assignment()
->LogicalIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &logical_id.computation_id, 4);
}
RendezvousKey GetRendezvousKey(const ExecutableRunOptions* run_options,
GlobalDeviceId device,
std::vector<ReplicaGroup> group,
int32_t channel_id_present,
std::optional<bool> use_global_device_ids,
int64_t op_id) {
const DeviceAssignment& device_assignment = *run_options->device_assignment();
RendezvousKey::CollectiveOpKind op_kind = channel_id_present
? RendezvousKey::kCrossModule
: RendezvousKey::kCrossReplica;
std::vector<GlobalDeviceId> participating_devices =
GetParticipatingDevices(GlobalDeviceId(device), device_assignment, group,
GetCollectiveOpGroupMode(channel_id_present != 0,
use_global_device_ids)
.value())
.value();
int num_local_participants = participating_devices.size();
return RendezvousKey{run_options->run_id(), std::move(participating_devices),
num_local_participants, op_kind, op_id};
}
CollectivesInterface* GetInProcessCollectivesImpl() {
static InProcessCollectives* c = new InProcessCollectives();
return c;
}
CollectivesInterface* GetCollectivesImpl(
const ExecutableRunOptions* run_options) {
if (run_options->cpu_executable_run_options() &&
run_options->cpu_executable_run_options()->collectives()) {
return run_options->cpu_executable_run_options()->collectives();
}
return GetInProcessCollectivesImpl();
}
absl::Duration DefaultCollectiveTimeout() { return absl::Minutes(30); }
absl::StatusOr<int> RankInGlobalDevices(
absl::Span<GlobalDeviceId const> devices, GlobalDeviceId device) {
auto it = absl::c_find(devices, device);
if (it == devices.end()) {
return InvalidArgument(
"Device %d not present in global devices %s.", device.value(),
absl::StrJoin(devices, ", ", [](std::string* out, GlobalDeviceId id) {
absl::StrAppend(out, id.value());
}));
}
return std::distance(devices.begin(), it);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllToAllImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers,
int64_t buffer_size, void** source_buffers,
void** destination_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
std::nullopt, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(source_buffers,
sizeof(void*) * num_buffers);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(destination_buffers,
sizeof(void*) * num_buffers);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllToAll(
rendezvous_key, buffer_size,
absl::Span<const void* const>(source_buffers, num_buffers),
absl::Span<void* const>(destination_buffers, num_buffers),
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllGatherImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int32_t use_global_device_ids,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int64_t buffer_size,
void* source_buffer, void* destination_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllGather(rendezvous_key, buffer_size,
source_buffer, destination_buffer,
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReduceScatterImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type,
int64_t chunk_elems, void* input_buffer,
void* output_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->ReduceScatter(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
static_cast<PrimitiveType>(element_type), chunk_elems, input_buffer,
output_buffer, DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllReduceImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr,
int32_t shape_length, int32_t num_buffers,
void** input_buffers, void** output_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
auto shape_str = ShapeString(shape_ptr, shape_length);
VLOG(2) << "All-reduce input/output shape : " << shape_str;
Shape shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length).value();
CHECK((num_buffers > 1 && shape.IsTuple()) ||
(num_buffers == 1 && LayoutUtil::IsDenseArray(shape)));
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
for (int i = 0; i < num_buffers; i++) {
Shape subshape = num_buffers == 1 ? shape : shape.tuple_shapes(i);
TF_CHECK_OK(communicator->AllReduce(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
subshape.element_type(), ShapeUtil::ElementsIn(subshape),
input_buffers[i], output_buffers[i], DefaultCollectiveTimeout()));
}
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void CollectivePermuteImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
int32_t byte_size, void* input_buffer,
void* output_buffer, const void* source_target_pairs,
int32_t source_target_pairs_size) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view source_target_pairs_serialized(
static_cast<const char*>(source_target_pairs), source_target_pairs_size);
auto pairs = absl::StrSplit(source_target_pairs_serialized, ',');
const DeviceAssignment::LogicalID logical_id =
run_options->device_assignment()->LogicalIdForDevice(device).value();
int32_t logical_device_id =
channel_id_present ? logical_id.computation_id : logical_id.replica_id;
std::optional<int> source_replica_id;
std::vector<int> copy_to;
for (auto& p : pairs) {
std::vector<std::string> mapping = absl::StrSplit(p, '=');
CHECK_EQ(mapping.size(), 2);
int from = std::stoi(mapping[0]);
int to = std::stoi(mapping[1]);
if (from == logical_device_id) {
copy_to.push_back(to);
}
if (to == logical_device_id) {
CHECK(!source_replica_id.has_value());
source_replica_id = from;
}
}
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, {}, channel_id_present,
std::nullopt, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->CollectivePermute(
rendezvous_key, byte_size, source_replica_id, copy_to, input_buffer,
output_buffer, DefaultCollectiveTimeout()));
}
}
}
}
}
extern "C" {
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int __xla_cpu_runtime_PrintfToStderr(
const char* format, ...) {
VLOG(3) << "__xla_cpu_runtime_PrintfToStderr " << format;
va_list args;
va_start(args, format);
int result = vfprintf(stderr, format, args);
va_end(args);
return result;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int64_t __xla_cpu_runtime_TracingStart(
const void* , const char* name,
const char* hlo_module, int64_t program_id) {
VLOG(3) << "TracingStart " << name;
auto trace_in =
tsl::profiler::TraceMeEncode(name, {{"hlo_op", name},
{"hlo_module", hlo_module},
{"program_id", program_id}});
return tsl::profiler::TraceMe::ActivityStart(trace_in);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TracingEnd(
const void* , int64_t id) {
VLOG(3) << "TracingEnd " << id;
tsl::profiler::TraceMe::ActivityEnd(id);
}
void* __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape, int32_t shape_length) {
return xla::cpu::runtime::AcquireInfeedBufferForDequeueImpl(
run_options, buffer_length, shape, shape_length);
}
void __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
return xla::cpu::runtime::ReleaseInfeedBufferAfterDequeueImpl(
run_options, buffer_length, buffer_ptr, shape_ptr, shape_length);
}
void* __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
return xla::cpu::runtime::AcquireOutfeedBufferForPopulationImpl(
run_options, buffer_length, shape_ptr, shape_length);
}
void __xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
return xla::cpu::runtime::ReleaseOutfeedBufferAfterPopulationImpl(
run_options, buffer_length, buffer_ptr, shape_ptr, shape_length);
}
void __xla_cpu_runtime_AllToAll(const xla::ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t num_buffers, int64_t buffer_size,
void** source_buffers,
void** destination_buffers) {
return xla::cpu::runtime::AllToAllImpl(
run_options, channel_id_present, op_id, replica_groups_str,
replica_groups_str_size, num_buffers, buffer_size, source_buffers,
destination_buffers);
}
void __xla_cpu_runtime_AllGather(const xla::ExecutableRunOptions* run_options,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int64_t buffer_size, void* source_buffer,
void* destination_buffer) {
return xla::cpu::runtime::AllGatherImpl(
run_options, channel_id_present, use_global_device_ids, op_id,
replica_groups_str, replica_groups_str_size, buffer_size, source_buffer,
destination_buffer);
}
void __xla_cpu_runtime_ReduceScatter(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type, int64_t chunk_elems,
void* input_buffer, void* output_buffer) {
return xla::cpu::runtime::ReduceScatterImpl(
run_options, replica_groups_str, replica_groups_str_size,
channel_id_present, use_global_device_ids, op_id, reduction_kind,
element_type, chunk_elems, input_buffer, output_buffer);
}
void __xla_cpu_runtime_AllReduce(const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr,
int32_t shape_length, int32_t num_buffers,
void** input_buffers, void** output_buffers) {
return xla::cpu::runtime::AllReduceImpl(
run_options, replica_groups_str, replica_groups_str_size,
channel_id_present, use_global_device_ids, op_id, reduction_kind,
shape_ptr, shape_length, num_buffers, input_buffers, output_buffers);
}
void __xla_cpu_runtime_ReplicaId(const xla::ExecutableRunOptions* run_options,
void* output_buffer) {
return xla::cpu::runtime::ReplicaIdImpl(run_options, output_buffer);
}
void __xla_cpu_runtime_PartitionId(const xla::ExecutableRunOptions* run_options,
void* output_buffer) {
return xla::cpu::runtime::PartitionIdImpl(run_options, output_buffer);
}
void __xla_cpu_runtime_CollectivePermute(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, int32_t byte_size, void* input_buffer, void* output_buffer,
const void* source_target_pairs, int32_t source_target_pairs_size) {
return xla::cpu::runtime::CollectivePermuteImpl(
run_options, channel_id_present, op_id, byte_size, input_buffer,
output_buffer, source_target_pairs, source_target_pairs_size);
}
} | #define EIGEN_USE_THREADS
#include "xla/service/cpu/cpu_runtime.h"
#include <memory>
#include <string>
#include <tuple>
#include "absl/strings/str_format.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/array2d.h"
#include "xla/client/local_client.h"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/runtime_custom_call_status.h"
#include "xla/service/cpu/runtime_matmul.h"
#include "xla/service/cpu/runtime_matmul_acl.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/types.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class CpuRuntimeTest : public ::testing::Test {};
template <typename T>
std::unique_ptr<Array2D<float>> MaybeTransposeArray2D(const Array2D<T>& array,
bool transpose) {
int64_t output_height = array.height();
int64_t output_width = array.width();
if (transpose) {
std::swap(output_width, output_height);
}
auto output = std::make_unique<Array2D<float>>(output_height, output_width);
for (int y = 0; y < array.height(); y++) {
for (int x = 0; x < array.width(); x++) {
if (transpose) {
(*output)(x, y) = array(y, x);
} else {
(*output)(y, x) = array(y, x);
}
}
}
return output;
}
void CheckMatrixMultiply(const Array2D<float>& a, const Array2D<float>& b,
const Array2D<float>& c) {
for (int i = 0; i < a.height(); ++i) {
for (int j = 0; j < b.width(); ++j) {
float sum = 0.0;
for (int k = 0; k < a.width(); ++k) {
sum += a(i, k) * b(k, j);
}
EXPECT_NEAR(sum, c(i, j), 0.01);
}
}
}
std::unique_ptr<Array2D<float>> EigenMatrixMultiply(const Array2D<float>& a,
const Array2D<float>& b,
bool transpose_lhs,
bool transpose_rhs,
bool single_threaded) {
CHECK_EQ(a.width(), b.height());
int64_t m = a.height();
int64_t n = b.width();
int64_t k = a.width();
auto a_transpose = MaybeTransposeArray2D(a, !transpose_lhs);
auto b_transpose = MaybeTransposeArray2D(b, !transpose_rhs);
auto c_transpose = std::make_unique<Array2D<float>>(n, m);
if (single_threaded) {
__xla_cpu_runtime_EigenSingleThreadedMatMulF32(
nullptr, c_transpose->data(), a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
} else {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "XLAEigen", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
ExecutableRunOptions run_options;
run_options.set_intra_op_thread_pool(&device);
__xla_cpu_runtime_EigenMatMulF32(&run_options, c_transpose->data(),
a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
}
return MaybeTransposeArray2D(*c_transpose, true);
}
struct MatMulShape {
int64_t m;
int64_t k;
int64_t n;
};
MatMulShape MatMulShapes[] = {
MatMulShape{2, 2, 3}, MatMulShape{256, 512, 1024},
MatMulShape{128, 128, 1}, MatMulShape{1, 128, 128},
MatMulShape{1, 32, 128}, MatMulShape{1, 32, 16},
MatMulShape{32, 16, 1}, MatMulShape{32, 128, 1},
};
using MatMulTestParam = std::tuple<MatMulShape, bool, bool, bool>;
class EigenMatMulTest : public CpuRuntimeTest,
public ::testing::WithParamInterface<MatMulTestParam> {
public:
static std::string Name(
const ::testing::TestParamInfo<MatMulTestParam>& info) {
MatMulShape shape = std::get<0>(info.param);
bool transpose_lhs = std::get<1>(info.param);
bool transpose_rhs = std::get<2>(info.param);
bool single_threaded = std::get<3>(info.param);
return absl::StrFormat("EigenMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
transpose_rhs ? "Trhs_" : "",
single_threaded ? "single" : "multi");
}
};
TEST_P(EigenMatMulTest, DoIt) {
MatMulShape shape = std::get<0>(GetParam());
bool transpose_lhs = std::get<1>(GetParam());
bool transpose_rhs = std::get<2>(GetParam());
bool single_threaded = std::get<3>(GetParam());
auto a = MakeLinspaceArray2D(0.0, 1.0, shape.m, shape.k);
auto b = MakeLinspaceArray2D(-2.0, 2.0, shape.k, shape.n);
auto c = EigenMatrixMultiply(*a, *b, transpose_lhs, transpose_rhs,
single_threaded);
CheckMatrixMultiply(*a, *b, *c);
}
INSTANTIATE_TEST_SUITE_P(EigenMatMulTestInstantiaion, EigenMatMulTest,
::testing::Combine(::testing::ValuesIn(MatMulShapes),
::testing::Bool(),
::testing::Bool(),
::testing::Bool()),
EigenMatMulTest::Name);
TEST_F(CpuRuntimeTest, SuccessStatus) {
XlaCustomCallStatus success_status;
ASSERT_TRUE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
TEST_F(CpuRuntimeTest, FailureStatus) {
XlaCustomCallStatus success_status;
XlaCustomCallStatusSetFailure(&success_status, "Failed", 6);
ASSERT_FALSE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
TEST_F(CpuRuntimeTest, GetDeviceOrdinalWhenRunOptionsEmpty) {
EXPECT_EQ(cpu::runtime::GetDeviceOrdinal(nullptr), 0);
}
TEST_F(CpuRuntimeTest, GetDeviceOrdinalWhenSetInRunOptions) {
ExecutableRunOptions run_options;
ASSERT_EQ(run_options.device_ordinal(), -1);
run_options.set_device_ordinal(3);
EXPECT_EQ(cpu::runtime::GetDeviceOrdinal(&run_options), 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0aa2f5d2-966e-42b9-a207-44881d727aae | cpp | tensorflow/tensorflow | tensor_flag_utils | tensorflow/core/kernels/tensor_flag_utils.cc | tensorflow/core/kernels/tensor_flag_utils_test.cc | #include "tensorflow/core/kernels/tensor_flag_utils.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace tensor_flag_utils {
Status ValidateSparseMatrixShardingConfig(const Tensor& config) {
if (TensorShapeUtils::IsScalar(config.shape())) {
const float scalar_config = config.template scalar<float>()();
if (0 < scalar_config && scalar_config <= 1.0) {
return absl::OkStatus();
}
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be in range (0, 1] but instead found ",
scalar_config));
}
if (!TensorShapeUtils::IsMatrix(config.shape())) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be either scalar or matrix "
"but instead found tensor of rank ",
config.dims()));
}
if (config.dim_size(1) != 3) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat(
"Expected config matrix to have dim(1) = 3 but instead found ",
config.dim_size(1)));
}
auto config_matrix = config.matrix<float>();
for (int i = 0; i < config.dim_size(0); ++i) {
if (0 > config_matrix(i, 0)) {
return errors::InvalidArgument(
"First column of fraction_rows_per_thread_config "
"should "
"have non-negative values but found ",
config_matrix(i, 0), " in row ", i);
}
if (0 > config_matrix(i, 1)) {
return errors::InvalidArgument(
"Second column of fraction_rows_per_thread_config "
"should "
"have non-negative values but found ",
config_matrix(i, 1), " in row ", i);
}
if (!(0 < config_matrix(i, 2) && config_matrix(i, 2) <= 1)) {
return errors::InvalidArgument(
"Last column of fraction_rows_per_thread_config should "
"have values in the range (0, 1] but found ",
config_matrix(i, 2), " in row ", i);
}
}
return absl::OkStatus();
}
template <typename MatrixType, typename K>
MatrixType FindConfigValueForKey(
const typename TTypes<MatrixType>::ConstMatrix& config_mat,
const std::pair<K, K>& key) {
const int last_row_index = config_mat.dimension(0) - 1;
for (int i = 0; i < last_row_index; ++i) {
if (key.first >= config_mat(i, 0) && key.second >= config_mat(i, 1)) {
return config_mat(i, 2);
}
}
return config_mat(last_row_index, 2);
}
Status ValidateScalarQuantityShardingConfig(const Tensor& config) {
if (TensorShapeUtils::IsScalar(config.shape())) {
const float scalar_config = config.template scalar<float>()();
if (0 < scalar_config && scalar_config <= 1.0) {
return absl::OkStatus();
}
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be in range (0, 1] but instead found ",
scalar_config));
}
if (!TensorShapeUtils::IsMatrix(config.shape())) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be either scalar or matrix "
"but instead found tensor of rank ",
config.dims()));
}
if (config.dim_size(1) != 2) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat(
"Expected config matrix to have dim(1) = 2 but instead found ",
config.dim_size(1)));
}
auto config_matrix = config.matrix<float>();
for (int i = 0; i < config.dim_size(0); ++i) {
if (0 > config_matrix(i, 0)) {
return errors::InvalidArgument(
"First column of fraction_rows_per_thread_config "
"should "
"have non-negative values but found ",
config_matrix(i, 0), " in row ", i);
}
if (!(0 < config_matrix(i, 1) && config_matrix(i, 1) <= 1)) {
return errors::InvalidArgument(
"Last column of fraction_rows_per_thread_config should "
"have values in the range (0, 1] but found ",
config_matrix(i, 1), " in row ", i);
}
}
return absl::OkStatus();
}
template <typename MatrixType, typename K>
MatrixType FindConfigValueForKey(
const typename TTypes<MatrixType>::ConstMatrix& config_mat, const K key) {
const int last_row_index = config_mat.dimension(0) - 1;
for (int i = 0; i < last_row_index; ++i) {
if (key >= config_mat(i, 0)) {
return config_mat(i, 1);
}
}
return config_mat(last_row_index, 1);
}
template <typename Tindices>
Tindices GetLinearBucket(const Tindices value, const Tindices bucket_size) {
const Tindices next_multiple_of_bucket_size =
(value + bucket_size - 1) / bucket_size * bucket_size;
return next_multiple_of_bucket_size - (bucket_size - 1);
}
template <typename Tindices>
Tindices GetPowerBucket(const Tindices value, const Tindices bucket_size) {
if (bucket_size == 1) {
return 1;
}
return std::pow(bucket_size, std::floor(std::log(bucket_size * (value - 1)) /
std::log(bucket_size)) -
1) +
1;
}
#define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \
template float FindConfigValueForKey<float, TypeIndex>( \
const TTypes<float>::ConstMatrix& config_mat, \
const std::pair<TypeIndex, TypeIndex>& key); \
template float FindConfigValueForKey<float, TypeIndex>( \
const TTypes<float>::ConstMatrix& config_mat, const TypeIndex key); \
template int64 FindConfigValueForKey<int64, TypeIndex>( \
const TTypes<int64_t>::ConstMatrix& config_mat, const TypeIndex key);
REGISTER_SPARSE_UTIL_FUNCTIONS(int32);
REGISTER_SPARSE_UTIL_FUNCTIONS(int64);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint8);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint16);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint32);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint64);
template int32 GetLinearBucket(const int32 value, const int32 bucket_size);
template int64 GetLinearBucket(const int64 value, const int64 bucket_size);
template int32 GetPowerBucket(const int32 value, const int32 bucket_size);
template int64 GetPowerBucket(const int64 value, const int64 bucket_size);
}
} | #include "tensorflow/core/kernels/tensor_flag_utils.h"
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/test.h"
namespace {
using ::int64_t;
using tensorflow::DataType;
using tensorflow::int32;
using tensorflow::Tensor;
using tensorflow::TTypes;
using tensorflow::error::INVALID_ARGUMENT;
using tensorflow::tensor_flag_utils::FindConfigValueForKey;
using tensorflow::tensor_flag_utils::GetLinearBucket;
using tensorflow::tensor_flag_utils::GetPowerBucket;
using tensorflow::tensor_flag_utils::ValidateScalarQuantityShardingConfig;
using tensorflow::tensor_flag_utils::ValidateSparseMatrixShardingConfig;
TEST(SparseUtilsTest, ValidateSparseMatrixShardingConfig) {
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0.7;
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.0;
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {1, 1});
int indx = 0;
for (const float v : {60.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 2});
int indx = 0;
for (const float v : {
60.0,
50.0,
}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 3});
int indx = 0;
for (const float v : {30.0, 20.0, 1.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, 50.0, 0.41, 30.0, 20.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, 40.0, 0.41, 30.0, 20.0, 10.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, 40.0, 0.41, 30.0, 20.0, -0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, -40.0, 0.41, 30.0, 20.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = -0.5;
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0;
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.2;
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
}
TEST(SparseUtilsTest, ValidateScalarQuantityShardingConfig) {
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0.7;
EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.0;
EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.2;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 1});
int indx = 0;
for (const float v : {60.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 2});
int indx = 0;
for (const float v : {
60.0,
50.0,
}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 3});
int indx = 0;
for (const float v : {30.0, 20.0, 1.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {60.0, 0.41, 30.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {60.0, 0.41, 30.0, 10.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {60.0, 0.41, 30.0, -0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {-40.0, 0.41, 20.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = -0.5;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.2;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
}
TEST(SparseUtils, FindConfigValueForKey) {
{
float data[] = {60.0, 50.0, 0.41, 30.0, 20.0, 0.1, 0, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 3, 3);
auto val = FindConfigValueForKey<float, int32>(config_mat, {70, 40});
EXPECT_FLOAT_EQ(0.1, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 50});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 60});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 40});
EXPECT_FLOAT_EQ(0.1, val);
val = FindConfigValueForKey<float, int32>(config_mat, {50, 60});
EXPECT_FLOAT_EQ(0.1, val);
val = FindConfigValueForKey<float, int32>(config_mat, {20, 30});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {30, 10});
EXPECT_FLOAT_EQ(0.7, val);
}
{
float data[] = {0, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 1, 3);
auto val = FindConfigValueForKey<float, int64_t>(config_mat, {70, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 50});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 60});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {50, 60});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {20, 30});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {30, 10});
EXPECT_FLOAT_EQ(0.7, val);
}
{
float data[] = {60.0, 50.0, 0.41, 0, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 2, 3);
auto val = FindConfigValueForKey<float, int32>(config_mat, {70, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 50});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 60});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {50, 60});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {20, 30});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {30, 10});
EXPECT_FLOAT_EQ(0.7, val);
}
{
float data[] = {60.0, 0.41, 50.0, 0.14, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 3, 2);
auto val = FindConfigValueForKey<float, int32>(config_mat, 70);
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, 60);
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, 55);
EXPECT_FLOAT_EQ(0.14, val);
val = FindConfigValueForKey<float, int32>(config_mat, 50);
EXPECT_FLOAT_EQ(0.14, val);
val = FindConfigValueForKey<float, int32>(config_mat, 20);
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, 30);
EXPECT_FLOAT_EQ(0.7, val);
}
}
TEST(SparseUtils, GetLinearBucket) {
EXPECT_EQ(11, GetLinearBucket(11, 5));
EXPECT_EQ(11, GetLinearBucket(12, 5));
EXPECT_EQ(1, GetLinearBucket(int64_t{4}, int64_t{5}));
}
TEST(SparseUtils, GetPowerBucket) {
EXPECT_EQ(6, GetPowerBucket(11, 5));
EXPECT_EQ(6, GetPowerBucket(12, 5));
EXPECT_EQ(1332, GetPowerBucket(1335, 11));
EXPECT_EQ(5, GetPowerBucket(int64_t{5}, int64_t{4}));
EXPECT_EQ(1, GetPowerBucket(int64_t{4}, int64_t{1}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_flag_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_flag_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bdbf84ca-7758-46a6-aae4-144045d7cdef | cpp | google/tsl | google_auth_provider | tsl/platform/cloud/google_auth_provider.cc | tsl/platform/cloud/google_auth_provider_test.cc | #include "tsl/platform/cloud/google_auth_provider.h"
#ifndef _WIN32
#include <pwd.h>
#include <unistd.h>
#else
#include <sys/types.h>
#endif
#include <fstream>
#include <utility>
#include "absl/strings/match.h"
#include "json/json.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/retrying_utils.h"
namespace tsl {
namespace {
constexpr char kGoogleApplicationCredentials[] =
"GOOGLE_APPLICATION_CREDENTIALS";
constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING";
constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG";
constexpr char kNoGceCheck[] = "NO_GCE_CHECK";
constexpr char kGCloudConfigFolder[] = ".config/gcloud/";
constexpr char kWellKnownCredentialsFile[] =
"application_default_credentials.json";
constexpr int kExpirationTimeMarginSec = 60;
constexpr char kOAuthV3Url[] = "https:
constexpr char kOAuthV4Url[] = "https:
constexpr char kGceTokenPath[] = "instance/service-accounts/default/token";
constexpr char kOAuthScope[] = "https:
bool IsFile(const string& filename) {
std::ifstream fstream(filename.c_str());
return fstream.good();
}
absl::Status GetEnvironmentVariableFileName(string* filename) {
if (!filename) {
return errors::FailedPrecondition("'filename' cannot be nullptr.");
}
const char* result = std::getenv(kGoogleApplicationCredentials);
if (!result || !IsFile(result)) {
return errors::NotFound(strings::StrCat("$", kGoogleApplicationCredentials,
" is not set or corrupt."));
}
*filename = result;
return absl::OkStatus();
}
absl::Status GetWellKnownFileName(string* filename) {
if (!filename) {
return errors::FailedPrecondition("'filename' cannot be nullptr.");
}
string config_dir;
const char* config_dir_override = std::getenv(kCloudSdkConfig);
if (config_dir_override) {
config_dir = config_dir_override;
} else {
const char* home_dir = std::getenv("HOME");
if (!home_dir) {
return errors::FailedPrecondition("Could not read $HOME.");
}
config_dir = io::JoinPath(home_dir, kGCloudConfigFolder);
}
auto result = io::JoinPath(config_dir, kWellKnownCredentialsFile);
if (!IsFile(result)) {
return errors::NotFound(
"Could not find the credentials file in the standard gcloud location.");
}
*filename = result;
return absl::OkStatus();
}
}
GoogleAuthProvider::GoogleAuthProvider(
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client)
: GoogleAuthProvider(std::unique_ptr<OAuthClient>(new OAuthClient()),
std::move(compute_engine_metadata_client),
Env::Default()) {}
GoogleAuthProvider::GoogleAuthProvider(
std::unique_ptr<OAuthClient> oauth_client,
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client,
Env* env)
: oauth_client_(std::move(oauth_client)),
compute_engine_metadata_client_(
std::move(compute_engine_metadata_client)),
env_(env) {}
absl::Status GoogleAuthProvider::GetToken(string* t) {
mutex_lock lock(mu_);
const uint64 now_sec = env_->NowSeconds();
if (now_sec + kExpirationTimeMarginSec < expiration_timestamp_sec_) {
*t = current_token_;
return absl::OkStatus();
}
if (GetTokenForTesting().ok()) {
*t = current_token_;
return absl::OkStatus();
}
auto token_from_files_status = GetTokenFromFiles();
if (token_from_files_status.ok()) {
*t = current_token_;
return absl::OkStatus();
}
char* no_gce_check_var = std::getenv(kNoGceCheck);
bool skip_gce_check = no_gce_check_var != nullptr &&
absl::EqualsIgnoreCase(no_gce_check_var, "true");
absl::Status token_from_gce_status;
if (skip_gce_check) {
token_from_gce_status =
absl::Status(absl::StatusCode::kCancelled,
strings::StrCat("GCE check skipped due to presence of $",
kNoGceCheck, " environment variable."));
} else {
token_from_gce_status = GetTokenFromGce();
}
if (token_from_gce_status.ok()) {
*t = current_token_;
return absl::OkStatus();
}
if (skip_gce_check) {
LOG(INFO)
<< "Attempting an empty bearer token since no token was retrieved "
<< "from files, and GCE metadata check was skipped.";
} else {
LOG(WARNING)
<< "All attempts to get a Google authentication bearer token failed, "
<< "returning an empty token. Retrieving token from files failed with "
"\""
<< token_from_files_status.ToString() << "\"."
<< " Retrieving token from GCE failed with \""
<< token_from_gce_status.ToString() << "\".";
}
*t = "";
if (skip_gce_check) {
expiration_timestamp_sec_ = 0;
} else {
expiration_timestamp_sec_ = UINT64_MAX;
}
current_token_ = "";
return absl::OkStatus();
}
absl::Status GoogleAuthProvider::GetTokenFromFiles() {
string credentials_filename;
if (!GetEnvironmentVariableFileName(&credentials_filename).ok() &&
!GetWellKnownFileName(&credentials_filename).ok()) {
return errors::NotFound("Could not locate the credentials file.");
}
Json::Value json;
Json::Reader reader;
std::ifstream credentials_fstream(credentials_filename);
if (!reader.parse(credentials_fstream, json)) {
return errors::FailedPrecondition(
"Couldn't parse the JSON credentials file.");
}
if (json.isMember("refresh_token")) {
TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromRefreshTokenJson(
json, kOAuthV3Url, ¤t_token_, &expiration_timestamp_sec_));
} else if (json.isMember("private_key")) {
TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromServiceAccountJson(
json, kOAuthV4Url, kOAuthScope, ¤t_token_,
&expiration_timestamp_sec_));
} else {
return errors::FailedPrecondition(
"Unexpected content of the JSON credentials file.");
}
return absl::OkStatus();
}
absl::Status GoogleAuthProvider::GetTokenFromGce() {
std::vector<char> response_buffer;
const uint64 request_timestamp_sec = env_->NowSeconds();
TF_RETURN_IF_ERROR(compute_engine_metadata_client_->GetMetadata(
kGceTokenPath, &response_buffer));
absl::string_view response =
absl::string_view(&response_buffer[0], response_buffer.size());
TF_RETURN_IF_ERROR(oauth_client_->ParseOAuthResponse(
response, request_timestamp_sec, ¤t_token_,
&expiration_timestamp_sec_));
return absl::OkStatus();
}
absl::Status GoogleAuthProvider::GetTokenForTesting() {
const char* token = std::getenv(kGoogleAuthTokenForTesting);
if (!token) {
return errors::NotFound("The env variable for testing was not set.");
}
expiration_timestamp_sec_ = UINT64_MAX;
current_token_ = token;
return absl::OkStatus();
}
} | #include "tsl/platform/cloud/google_auth_provider.h"
#include <stdlib.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
string TestData() {
return io::JoinPath(testing::TslSrcRoot(), "platform", "cloud", "testdata");
}
class FakeEnv : public EnvWrapper {
public:
FakeEnv() : EnvWrapper(Env::Default()) {}
uint64 NowSeconds() const override { return now; }
uint64 now = 10000;
};
class FakeOAuthClient : public OAuthClient {
public:
absl::Status GetTokenFromServiceAccountJson(
Json::Value json, absl::string_view oauth_server_uri,
absl::string_view scope, string* token,
uint64* expiration_timestamp_sec) override {
provided_credentials_json = json;
*token = return_token;
*expiration_timestamp_sec = return_expiration_timestamp;
return absl::OkStatus();
}
absl::Status GetTokenFromRefreshTokenJson(
Json::Value json, absl::string_view oauth_server_uri, string* token,
uint64* expiration_timestamp_sec) override {
provided_credentials_json = json;
*token = return_token;
*expiration_timestamp_sec = return_expiration_timestamp;
return absl::OkStatus();
}
string return_token;
uint64 return_expiration_timestamp;
Json::Value provided_credentials_json;
};
}
class GoogleAuthProviderTest : public ::testing::Test {
protected:
void SetUp() override { ClearEnvVars(); }
void TearDown() override { ClearEnvVars(); }
void ClearEnvVars() {
unsetenv("CLOUDSDK_CONFIG");
unsetenv("GOOGLE_APPLICATION_CREDENTIALS");
unsetenv("GOOGLE_AUTH_TOKEN_FOR_TESTING");
unsetenv("NO_GCE_CHECK");
}
};
TEST_F(GoogleAuthProviderTest, EnvironmentVariable_Caching) {
setenv("GOOGLE_APPLICATION_CREDENTIALS",
io::JoinPath(TestData(), "service_account_credentials.json").c_str(),
1);
setenv("CLOUDSDK_CONFIG", TestData().c_str(),
1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
oauth_client->return_token = "fake-token";
oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600;
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
EXPECT_EQ("fake_key_id",
oauth_client->provided_credentials_json.get("private_key_id", "")
.asString());
oauth_client->return_token = "new-fake-token";
env.now += 3000;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
env.now += 598;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("new-fake-token", token);
}
TEST_F(GoogleAuthProviderTest, GCloudRefreshToken) {
setenv("CLOUDSDK_CONFIG", TestData().c_str(), 1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
oauth_client->return_token = "fake-token";
oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600;
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
EXPECT_EQ("fake-refresh-token",
oauth_client->provided_credentials_json.get("refresh_token", "")
.asString());
}
TEST_F(GoogleAuthProviderTest, RunningOnGCE) {
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
R"(
{
"access_token":"fake-gce-token",
"expires_in": 3920,
"token_type":"Bearer"
})"),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
R"(
{
"access_token":"new-fake-gce-token",
"expires_in": 3920,
"token_type":"Bearer"
})")});
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-gce-token", token);
env.now += 3700;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-gce-token", token);
env.now += 598;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("new-fake-gce-token", token);
}
TEST_F(GoogleAuthProviderTest, OverrideForTesting) {
setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "tokenForTesting", 1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> empty_requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&empty_requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("tokenForTesting", token);
}
TEST_F(GoogleAuthProviderTest, NothingAvailable) {
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::NotFound("404"), 404)});
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
}
TEST_F(GoogleAuthProviderTest, NoGceCheckEnvironmentVariable) {
setenv("NO_GCE_CHECK", "True", 1);
auto oauth_client = new FakeOAuthClient;
FakeEnv env;
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
nullptr, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
setenv("NO_GCE_CHECK", "true", 1);
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "newToken", 1);
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("newToken", token);
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/google_auth_provider.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/google_auth_provider_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
2d3d62f2-68fa-4364-8207-ec0daf9a2f91 | cpp | tensorflow/tensorflow | add_original_value | third_party/xla/xla/service/add_original_value.cc | third_party/xla/xla/service/add_original_value_test.cc | #include "xla/service/add_original_value.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_original_value.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> AddOriginalValue::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto computation : module->computations()) {
for (const auto instruction : computation->instructions()) {
auto original_value =
std::make_shared<OriginalValue>(instruction->shape());
if (instruction->opcode() == HloOpcode::kGetTupleElement) {
const auto* tuple = instruction->operand(0);
original_value->CopySubtreeFrom(*tuple->original_value(),
{instruction->tuple_index()}, {});
} else if (instruction->opcode() == HloOpcode::kTuple) {
for (int64_t operand_number = 0;
operand_number < instruction->operand_count(); ++operand_number) {
original_value->CopySubtreeFrom(
*instruction->operand(operand_number)->original_value(), {},
{operand_number});
}
} else {
for (auto& leaf : original_value->leaves()) {
leaf.second = {std::string(instruction->name()), leaf.first};
}
}
instruction->set_original_value(original_value);
changed = true;
}
}
return changed;
}
} | #include "xla/service/add_original_value.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using AddOriginalValueTest = HloTestBase;
using ::absl::string_view;
TEST_F(AddOriginalValueTest, Basic) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(s32[]{:T(256)})->u32[2]{0:T(256)}}
ENTRY test {
Arg_0.1 = s32[] parameter(0)
constant.2 = s32[] constant(32)
shift-right-logical.3 = s32[] shift-right-logical(Arg_0.1, constant.2)
convert.4 = u32[] convert(shift-right-logical.3)
reshape.5 = u32[1]{0} reshape(convert.4)
convert.6 = u32[] convert(Arg_0.1)
reshape.7 = u32[1]{0} reshape(convert.6)
ROOT concatenate.8 = u32[2]{0} concatenate(reshape.5, reshape.7), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AddOriginalValue pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(AddOriginalValueTest, Tuple) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->((f32[], f32[3]{0}), f32[2,3]{1,0})}
ENTRY test (v1: f32[], v2: f32[3], v3: f32[2,3]) -> ((f32[], f32[3]{0}), f32[2,3]{1,0}) {
v1 = f32[] parameter(0)
v2 = f32[3]{0} parameter(1)
v3 = f32[2,3]{1,0} parameter(2)
t1 = (f32[], f32[3]{0}) tuple(f32[] v1, f32[3]{0} v2)
ROOT t2 = ((f32[], f32[3]{0}), f32[2,3]{1,0}) tuple((f32[], f32[3]{0}) t1, f32[2,3]{1,0} v3)
}
)";
RunAndFilecheckHloRewrite(hlo_string, AddOriginalValue(), R"(
CHECK: %[[V1:.*]] = f32[] parameter(0), origin={{[{]}}{"[[V1]]"}
CHECK: %[[V2:.*]] = f32[3]{0} parameter(1), origin={{[{]}}{"[[V2]]"}
CHECK: %[[TUPLE:.*]] = (f32[], f32[3]{0}) tuple(%[[V1]], %[[V2]]), origin={({"[[V1]]"}, {"[[V2]]"})}
CHECK: %[[V3:.*]] = f32[2,3]{1,0} parameter(2), origin={{[{]}}{"[[V3]]"}
CHECK: ((f32[], f32[3]{0}), f32[2,3]{1,0}) tuple(%[[TUPLE]], %[[V3]]), origin={(({"v1"}, {"v2"}), {"v3"})}
)");
}
TEST_F(AddOriginalValueTest, GetTupleElement) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->s32[2,3]{1,0}}
ENTRY test {
constant = f32[3]{0} constant({1, 2, 3})
constant.1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
tuple = (f32[3]{0}, s32[2,3]{1,0}) tuple(f32[3]{0} constant, s32[2,3]{1,0} constant.1)
ROOT get-tuple-element = s32[2,3]{1,0} get-tuple-element((f32[3]{0}, s32[2,3]{1,0}) tuple), index=1
}
)";
RunAndFilecheckHloRewrite(hlo_string, AddOriginalValue(), R"(
CHECK: %[[CONSTANT1:.*]] = f32[3]{0} constant({1, 2, 3}), origin={{[{]}}{"[[CONSTANT1]]"}
CHECK: %[[CONSTANT2:.*]] = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } }), origin={{[{]}}{"[[CONSTANT2]]"}
CHECK: %[[TUPLE:.*]] = (f32[3]{0}, s32[2,3]{1,0}) tuple(%[[CONSTANT1]], %[[CONSTANT2]]), origin={({"[[CONSTANT1]]"}, {"[[CONSTANT2]]"})}
CHECK: s32[2,3]{1,0} get-tuple-element(%[[TUPLE]]), index=1, origin={{[{]}}{"[[CONSTANT2]]"}
)");
}
TEST_F(AddOriginalValueTest, GetTupleElementNonSymbolic) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={((f32[], s32[]))->s32[]}
ENTRY test {
p = (f32[], s32[]) parameter(0)
ROOT get-tuple-element = s32[] get-tuple-element(p), index=1
}
)";
RunAndFilecheckHloRewrite(hlo_string, AddOriginalValue(), R"(
CHECK: %[[PARAM:.*]] = (f32[], s32[]) parameter(0), origin={({"p" {0}{{[}]}}, {"p" {1}})}
CHECK: s32[] get-tuple-element(%[[PARAM]]), index=1, origin={{[{]}}{"[[PARAM]]" {1}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/add_original_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/add_original_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa187d9f-b591-4c32-9640-9440a3eb802c | cpp | tensorflow/tensorflow | custom_validation_embedder | tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include <algorithm>
#include <iostream>
#include <iterator>
#include <string>
#include <vector>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flexbuffers.h"
#include "flatbuffers/vector.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/tools/verifier.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> CallOpCustomOptions(
int primary_subgraph_index, int batch_size, FlatBufferBuilder& output) {
flexbuffers::Builder flexbuffer_builder;
flexbuffer_builder.Map([&] {
flexbuffer_builder.Int("subgraph_index", primary_subgraph_index);
flexbuffer_builder.Int("loop_count", batch_size);
});
flexbuffer_builder.Finish();
return output.CreateVector(flexbuffer_builder.GetBuffer());
}
}
void CustomValidationEmbedder::CreateTensorsFrom(
const SubGraph& from_subgraph, const std::vector<int>& from_indexes,
std::vector<std::vector<uint8_t>>* buffer_content,
flatbuffers::FlatBufferBuilder& fbb, std::vector<int>& new_indexes,
std::vector<flatbuffers::Offset<Buffer>>& buffers,
std::vector<flatbuffers::Offset<Tensor>>& tensors) {
int tensor_index_start = tensors.size();
for (int i = 0; i < from_indexes.size(); i++) {
TensorT base_tensor;
from_subgraph.tensors()->Get(from_indexes[i])->UnPackTo(&base_tensor);
if (!base_tensor.shape.empty() && base_tensor.shape[0] == 1) {
base_tensor.shape[0] = batch_size_;
}
if (!base_tensor.shape_signature.empty() &&
base_tensor.shape_signature[0] == 1) {
base_tensor.shape_signature[0] = batch_size_;
}
base_tensor.buffer = buffers.size();
tensors.push_back(CreateTensor(fbb, &base_tensor));
new_indexes.push_back(tensor_index_start + i);
if (buffer_content && !(*buffer_content)[i].empty()) {
buffers.push_back(
CreateBuffer(fbb, fbb.CreateVector((*buffer_content)[i])));
} else {
buffers.push_back(CreateBuffer(fbb));
}
}
}
MinibenchmarkStatus CustomValidationEmbedder::BuildModel(
const Model& main_model, flatbuffers::FlatBufferBuilder& fbb) {
ModelT main_model_obj;
main_model.UnPackTo(&main_model_obj);
if (main_model_obj.subgraphs[0]->inputs.size() != custom_input_.size()) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"Unexpected custom_input size. Expected: %d. Actual: %d.",
main_model_obj.subgraphs[0]->inputs.size(), custom_input_.size());
return kMinibenchmarkValidationSubgraphBuildFailed;
}
std::vector<flatbuffers::Offset<Metadata>> metadata;
metadata.reserve(main_model_obj.metadata.size());
for (auto& iter : main_model_obj.metadata) {
metadata.push_back(CreateMetadata(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<SignatureDef>> signature_defs;
signature_defs.reserve(main_model_obj.signature_defs.size());
for (auto& iter : main_model_obj.signature_defs) {
signature_defs.push_back(CreateSignatureDef(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<SubGraph>> subgraphs;
subgraphs.reserve(main_model_obj.subgraphs.size());
for (auto& iter : main_model_obj.subgraphs) {
subgraphs.push_back(CreateSubGraph(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<Buffer>> buffers;
buffers.reserve(main_model_obj.buffers.size());
for (auto& iter : main_model_obj.buffers) {
buffers.push_back(CreateBuffer(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes;
operator_codes.reserve(main_model_obj.operator_codes.size());
for (auto& iter : main_model_obj.operator_codes) {
operator_codes.push_back(CreateOperatorCode(fbb, iter.get()));
}
operator_codes.push_back(CreateOperatorCode(
fbb, BuiltinOperator_CUSTOM, fbb.CreateString("validation/call")));
int operator_code_index = operator_codes.size() - 1;
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<int32_t> input;
CreateTensorsFrom(*main_model.subgraphs()->Get(0),
main_model_obj.subgraphs[0]->inputs, &custom_input_, fbb,
input, buffers, tensors);
std::vector<int32_t> output;
CreateTensorsFrom(*main_model.subgraphs()->Get(0),
main_model_obj.subgraphs[0]->outputs, nullptr, fbb, output,
buffers, tensors);
auto input_offset = fbb.CreateVector(input);
auto output_offset = fbb.CreateVector(output);
std::vector<flatbuffers::Offset<Operator>> operators{CreateOperator(
fbb, operator_code_index, input_offset, output_offset,
tflite::BuiltinOptions_NONE, 0,
CallOpCustomOptions( 0, batch_size_, fbb),
tflite::CustomOptionsFormat_FLEXBUFFERS)};
subgraphs.push_back(
CreateSubGraph(fbb, fbb.CreateVector(tensors), input_offset,
output_offset, fbb.CreateVector(operators),
fbb.CreateString(std::string(kValidationGraphName))));
fbb.Finish(
CreateModel(fbb, kModelSchemaVersion, fbb.CreateVector(operator_codes),
fbb.CreateVector(subgraphs),
fbb.CreateString(main_model_obj.description),
fbb.CreateVector(buffers),
0, fbb.CreateVector(metadata),
fbb.CreateVector(signature_defs)),
"TFL3");
if (Verify(fbb.GetBufferPointer(), fbb.GetSize(), error_reporter_)) {
return kMinibenchmarkSuccess;
} else {
return kMinibenchmarkValidationSubgraphBuildFailed;
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
constexpr int kMobileNetModelInputByteSize = 1 * 224 * 224 * 3;
class CustomValidationEmbedderTest : public ::testing::Test {
protected:
void SetUp() override {
std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path.empty());
plain_model_loader_ =
std::make_unique<tools::PathModelLoader>(plain_model_path);
ASSERT_TRUE(plain_model_loader_->Init());
}
std::unique_ptr<tools::ModelLoader> plain_model_loader_;
};
TEST_F(CustomValidationEmbedderTest, BuildValidationModelSucceed) {
int batch_size = 5;
std::vector<uint8_t> input_buffer(batch_size * kMobileNetModelInputByteSize);
CustomValidationEmbedder embedder(batch_size, {input_buffer});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkSuccess);
auto model =
FlatBufferModel::BuildFromModel(GetModel(fbb.GetBufferPointer()));
auto interpreter = std::make_unique<Interpreter>();
auto resolver = std::make_unique<
::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
resolver->AddCustom("validation/call", ops::Register_CALL(), 1);
ASSERT_EQ(InterpreterBuilder(*model, *resolver)(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
Subgraph* validation_graph = interpreter->subgraph(1);
EXPECT_THAT(input_buffer, testing::ElementsAreArray(
GetTensorData<uint8_t>(validation_graph->tensor(
validation_graph->inputs()[0])),
input_buffer.size()));
EXPECT_EQ(validation_graph->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(validation_graph->Invoke(), kTfLiteOk);
}
TEST_F(CustomValidationEmbedderTest, BuildValidationModelTooManyInput) {
int batch_size = 5;
CustomValidationEmbedder embedder(batch_size, {{}, {}});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkValidationSubgraphBuildFailed);
}
TEST_F(CustomValidationEmbedderTest, BuildValidationModelInvalidBufferSize) {
CustomValidationEmbedder embedder(2, {std::vector<uint8_t>(2, 2)});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9327b78-625c-4f69-a147-65f529cff80c | cpp | google/tensorstore | span_json | tensorstore/util/span_json.h | tensorstore/util/span_json_test.cc | #ifndef TENSORSTORE_UTIL_SPAN_JSON_H_
#define TENSORSTORE_UTIL_SPAN_JSON_H_
#include <cstddef>
#include <nlohmann/json.hpp>
#include "tensorstore/util/span.h"
namespace tensorstore {
template <typename T, ptrdiff_t Extent>
void to_json(::nlohmann::json& out,
tensorstore::span<T, Extent> s) {
out = ::nlohmann::json::array_t(s.begin(), s.end());
}
}
#endif | #include "tensorstore/util/span_json.h"
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::span;
TEST(SpanJsonTest, Basic) {
EXPECT_EQ(::nlohmann::json({1, 2, 3}),
::nlohmann::json(span<const int, 3>({1, 2, 3})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/span_json.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/span_json_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9be7b229-09c8-4138-96c7-ad097a47e3ee | cpp | google/cel-cpp | type_conversion_functions | runtime/standard/type_conversion_functions.cc | runtime/standard/type_conversion_functions_test.cc | #include "runtime/standard/type_conversion_functions.h"
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/overflow.h"
#include "internal/status_macros.h"
#include "internal/time.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::cel::internal::EncodeDurationToJson;
using ::cel::internal::EncodeTimestampToJson;
using ::cel::internal::MaxTimestamp;
const absl::Time kMaxTime = MaxTimestamp();
absl::Status RegisterBoolConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
return UnaryFunctionAdapter<bool, bool>::RegisterGlobalOverload(
cel::builtin::kBool, [](ValueManager&, bool v) { return v; }, registry);
}
absl::Status RegisterIntConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<int64_t, bool>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager&, bool v) { return static_cast<int64_t>(v); },
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<Value, double>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager& value_factory, double v) -> Value {
auto conv = cel::internal::CheckedDoubleToInt64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateIntValue(*conv);
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<int64_t, int64_t>::RegisterGlobalOverload(
cel::builtin::kInt, [](ValueManager&, int64_t v) { return v; }, registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager& value_factory, const StringValue& s) -> Value {
int64_t result;
if (!absl::SimpleAtoi(s.ToString(), &result)) {
return value_factory.CreateErrorValue(
absl::InvalidArgumentError("cannot convert string to int"));
}
return value_factory.CreateIntValue(result);
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<int64_t, absl::Time>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager&, absl::Time t) { return absl::ToUnixSeconds(t); },
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<Value, uint64_t>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager& value_factory, uint64_t v) -> Value {
auto conv = cel::internal::CheckedUint64ToInt64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateIntValue(*conv);
},
registry);
}
absl::Status RegisterStringConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
if (!options.enable_string_conversion) {
return absl::OkStatus();
}
absl::Status status =
UnaryFunctionAdapter<Value, const BytesValue&>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, const BytesValue& value) -> Value {
auto handle_or = value_factory.CreateStringValue(value.ToString());
if (!handle_or.ok()) {
return value_factory.CreateErrorValue(handle_or.status());
}
return *handle_or;
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<StringValue, double>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, double value) -> StringValue {
return value_factory.CreateUncheckedStringValue(absl::StrCat(value));
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<StringValue, int64_t>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, int64_t value) -> StringValue {
return value_factory.CreateUncheckedStringValue(absl::StrCat(value));
},
registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<StringValue, StringValue>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager&, StringValue value) -> StringValue { return value; },
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<StringValue, uint64_t>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, uint64_t value) -> StringValue {
return value_factory.CreateUncheckedStringValue(absl::StrCat(value));
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<Value, absl::Duration>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, absl::Duration value) -> Value {
auto encode = EncodeDurationToJson(value);
if (!encode.ok()) {
return value_factory.CreateErrorValue(encode.status());
}
return value_factory.CreateUncheckedStringValue(*encode);
},
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<Value, absl::Time>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, absl::Time value) -> Value {
auto encode = EncodeTimestampToJson(value);
if (!encode.ok()) {
return value_factory.CreateErrorValue(encode.status());
}
return value_factory.CreateUncheckedStringValue(*encode);
},
registry);
}
absl::Status RegisterUintConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<Value, double>::RegisterGlobalOverload(
cel::builtin::kUint,
[](ValueManager& value_factory, double v) -> Value {
auto conv = cel::internal::CheckedDoubleToUint64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateUintValue(*conv);
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<Value, int64_t>::RegisterGlobalOverload(
cel::builtin::kUint,
[](ValueManager& value_factory, int64_t v) -> Value {
auto conv = cel::internal::CheckedInt64ToUint64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateUintValue(*conv);
},
registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kUint,
[](ValueManager& value_factory, const StringValue& s) -> Value {
uint64_t result;
if (!absl::SimpleAtoi(s.ToString(), &result)) {
return value_factory.CreateErrorValue(
absl::InvalidArgumentError("doesn't convert to a string"));
}
return value_factory.CreateUintValue(result);
},
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<uint64_t, uint64_t>::RegisterGlobalOverload(
cel::builtin::kUint, [](ValueManager&, uint64_t v) { return v; },
registry);
}
absl::Status RegisterBytesConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<BytesValue, BytesValue>::RegisterGlobalOverload(
cel::builtin::kBytes,
[](ValueManager&, BytesValue value) -> BytesValue { return value; },
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<absl::StatusOr<BytesValue>, const StringValue&>::
RegisterGlobalOverload(
cel::builtin::kBytes,
[](ValueManager& value_factory, const StringValue& value) {
return value_factory.CreateBytesValue(value.ToString());
},
registry);
}
absl::Status RegisterDoubleConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<double, double>::RegisterGlobalOverload(
cel::builtin::kDouble, [](ValueManager&, double v) { return v; },
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<double, int64_t>::RegisterGlobalOverload(
cel::builtin::kDouble,
[](ValueManager&, int64_t v) { return static_cast<double>(v); },
registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kDouble,
[](ValueManager& value_factory, const StringValue& s) -> Value {
double result;
if (absl::SimpleAtod(s.ToString(), &result)) {
return value_factory.CreateDoubleValue(result);
} else {
return value_factory.CreateErrorValue(absl::InvalidArgumentError(
"cannot convert string to double"));
}
},
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<double, uint64_t>::RegisterGlobalOverload(
cel::builtin::kDouble,
[](ValueManager&, uint64_t v) { return static_cast<double>(v); },
registry);
}
Value CreateDurationFromString(ValueManager& value_factory,
const StringValue& dur_str) {
absl::Duration d;
if (!absl::ParseDuration(dur_str.ToString(), &d)) {
return value_factory.CreateErrorValue(
absl::InvalidArgumentError("String to Duration conversion failed"));
}
auto duration = value_factory.CreateDurationValue(d);
if (!duration.ok()) {
return value_factory.CreateErrorValue(duration.status());
}
return *duration;
}
absl::Status RegisterTimeConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(
(UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kDuration, CreateDurationFromString, registry)));
CEL_RETURN_IF_ERROR(
(UnaryFunctionAdapter<Value, int64_t>::RegisterGlobalOverload(
cel::builtin::kTimestamp,
[](ValueManager& value_factory, int64_t epoch_seconds) -> Value {
return value_factory.CreateUncheckedTimestampValue(
absl::FromUnixSeconds(epoch_seconds));
},
registry)));
CEL_RETURN_IF_ERROR(
(UnaryFunctionAdapter<Value, absl::Time>::RegisterGlobalOverload(
cel::builtin::kTimestamp,
[](ValueManager&, absl::Time value) -> Value {
return TimestampValue(value);
},
registry)));
CEL_RETURN_IF_ERROR(
(UnaryFunctionAdapter<Value, absl::Duration>::RegisterGlobalOverload(
cel::builtin::kDuration,
[](ValueManager&, absl::Duration value) -> Value {
return DurationValue(value);
},
registry)));
bool enable_timestamp_duration_overflow_errors =
options.enable_timestamp_duration_overflow_errors;
return UnaryFunctionAdapter<Value, const StringValue&>::
RegisterGlobalOverload(
cel::builtin::kTimestamp,
[=](ValueManager& value_factory,
const StringValue& time_str) -> Value {
absl::Time ts;
if (!absl::ParseTime(absl::RFC3339_full, time_str.ToString(), &ts,
nullptr)) {
return value_factory.CreateErrorValue(absl::InvalidArgumentError(
"String to Timestamp conversion failed"));
}
if (enable_timestamp_duration_overflow_errors) {
if (ts < absl::UniversalEpoch() || ts > kMaxTime) {
return value_factory.CreateErrorValue(
absl::OutOfRangeError("timestamp overflow"));
}
}
return value_factory.CreateUncheckedTimestampValue(ts);
},
registry);
}
}
absl::Status RegisterTypeConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterBoolConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterBytesConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterDoubleConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterIntConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterStringConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterUintConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterTimeConversionFunctions(registry, options));
absl::Status status =
UnaryFunctionAdapter<Value, const Value&>::RegisterGlobalOverload(
cel::builtin::kDyn,
[](ValueManager&, const Value& value) -> Value { return value; },
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<Value, const Value&>::RegisterGlobalOverload(
cel::builtin::kType,
[](ValueManager& factory, const Value& value) {
return factory.CreateTypeValue(value.GetRuntimeType());
},
registry);
}
} | #include "runtime/standard/type_conversion_functions.h"
#include <vector>
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
MATCHER_P3(MatchesUnaryDescriptor, name, receiver, expected_kind, "") {
const FunctionDescriptor& descriptor = arg.descriptor;
std::vector<Kind> types{expected_kind};
return descriptor.name() == name && descriptor.receiver_style() == receiver &&
descriptor.types() == types;
}
TEST(RegisterTypeConversionFunctions, RegisterBoolConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(registry.FindStaticOverloads(builtin::kBool, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kBool, false, Kind::kBool)));
}
TEST(RegisterTypeConversionFunctions, RegisterIntConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kInt, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kBool),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kString),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kTimestamp)));
}
TEST(RegisterTypeConversionFunctions, RegisterUintConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kUint, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterDoubleConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kDouble, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterStringConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_string_conversion = true;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kString, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kString, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kString),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kBytes),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kDuration),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kTimestamp)));
}
TEST(RegisterTypeConversionFunctions,
RegisterStringConversionFunctionsDisabled) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_string_conversion = false;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kString, false, {Kind::kAny}),
IsEmpty());
}
TEST(RegisterTypeConversionFunctions, RegisterBytesConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kBytes, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kBytes, false, Kind::kBytes),
MatchesUnaryDescriptor(builtin::kBytes, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterTimeConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kTimestamp, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kTimestamp, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kTimestamp, false, Kind::kString),
MatchesUnaryDescriptor(builtin::kTimestamp, false,
Kind::kTimestamp)));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kDuration, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDuration, false, Kind::kString),
MatchesUnaryDescriptor(builtin::kDuration, false, Kind::kDuration)));
}
TEST(RegisterTypeConversionFunctions, RegisterMetaTypeConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(registry.FindStaticOverloads(builtin::kDyn, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDyn, false, Kind::kAny)));
EXPECT_THAT(registry.FindStaticOverloads(builtin::kType, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kType, false, Kind::kAny)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/type_conversion_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/type_conversion_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5dee0d93-d903-4902-acfb-be17f27a8659 | cpp | tensorflow/tensorflow | delegate_provider | tensorflow/lite/tools/delegates/delegate_provider.cc | tensorflow/lite/tools/delegates/delegate_provider_test.cc | #include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace tflite {
namespace tools {
TfLiteDelegatePtr CreateNullDelegate() {
return TfLiteDelegatePtr(nullptr, [](TfLiteOpaqueDelegate*) {});
}
void ProvidedDelegateList::AddAllDelegateParams() const {
for (const auto& provider : providers_) {
params_->Merge(provider->DefaultParams());
}
}
void ProvidedDelegateList::AppendCmdlineFlags(std::vector<Flag>& flags) const {
for (const auto& provider : providers_) {
auto delegate_flags = provider->CreateFlags(params_);
flags.insert(flags.end(), delegate_flags.begin(), delegate_flags.end());
}
}
void ProvidedDelegateList::RemoveCmdlineFlag(std::vector<Flag>& flags,
const std::string& name) const {
decltype(flags.begin()) it;
for (it = flags.begin(); it < flags.end();) {
if (it->GetFlagName() == name) {
it = flags.erase(it);
} else {
++it;
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate>
ProvidedDelegateList::CreateAllRankedDelegates(const ToolParams& params) const {
std::vector<ProvidedDelegateList::ProvidedDelegate> delegates;
for (const auto& provider : providers_) {
auto ptr_rank = provider->CreateRankedTfLiteDelegate(params);
if (ptr_rank.first == nullptr) continue;
static bool already_logged = false;
if (!already_logged) {
TFLITE_LOG(INFO) << provider->GetName() << " delegate created.";
#ifndef NDEBUG
provider->LogParams(params, false);
#endif
already_logged = true;
}
ProvidedDelegateList::ProvidedDelegate info;
info.provider = provider.get();
info.delegate = std::move(ptr_rank.first);
info.rank = ptr_rank.second;
delegates.emplace_back(std::move(info));
}
std::sort(delegates.begin(), delegates.end(),
[](const ProvidedDelegateList::ProvidedDelegate& a,
const ProvidedDelegateList::ProvidedDelegate& b) {
return a.rank < b.rank;
});
return delegates;
}
}
} | #include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/test_util.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
TEST(ProvidedDelegateListTest, AddAllDelegateParams) {
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
#if !TFLITE_WITH_STABLE_ABI
EXPECT_TRUE(params.HasParam("use_nnapi"));
#endif
}
TEST(ProvidedDelegateListTest, AppendCmdlineFlags) {
std::vector<Flag> flags;
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
providers.AppendCmdlineFlags(flags);
EXPECT_FALSE(flags.empty());
}
TEST(KernelTestDelegateProvidersTest, CreateAllRankedDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
#if TFLITE_WITH_STABLE_ABI
ASSERT_EQ(TfLiteInitializeShimsForTest(), 0);
params.Set<bool>("use_xnnpack", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("XNNPACK", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
#else
params.Set<bool>("use_xnnpack", true, 2);
params.Set<bool>("use_dummy_delegate", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(2, delegates.size());
EXPECT_EQ("DummyDelegate", delegates.front().provider->GetName());
EXPECT_EQ(1, delegates.front().rank);
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ("XNNPACK", delegates.back().provider->GetName());
EXPECT_NE(nullptr, delegates.back().delegate.get());
EXPECT_EQ(2, delegates.back().rank);
#endif
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c2ba008-edd9-410e-a26c-ecfbda3b69d9 | cpp | tensorflow/tensorflow | attr_util | tensorflow/core/runtime_fallback/kernel/attr_util.cc | tensorflow/core/runtime_fallback/kernel/attr_util_test.cc | #include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include <assert.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include "tensorflow/core/util/padding.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/kernel_utils.h"
namespace tensorflow {
DataType ParseTFDataType(StringPiece dtype) {
if (dtype == "DT_INT8") {
return DataType::DT_INT8;
} else if (dtype == "DT_INT32") {
return DataType::DT_INT32;
} else if (dtype == "DT_INT64") {
return DataType::DT_INT64;
} else if (dtype == "DT_FLOAT") {
return DataType::DT_FLOAT;
} else if (dtype == "DT_DOUBLE") {
return DataType::DT_DOUBLE;
} else {
assert(false && "Unsupported dtype");
abort();
}
}
bool ParseBoolAttrValue(StringPiece attr_value) {
if (attr_value == "false") {
return false;
} else if (attr_value == "true") {
return true;
} else {
assert(false && "Bool attribute value invalid");
abort();
}
}
Status ParseValue(StringPiece input, bool* value) {
*value = ParseBoolAttrValue(input);
return absl::OkStatus();
}
Status ParseValue(StringPiece input, int32* value) {
bool parse_result = absl::SimpleAtoi(input, value);
if (!parse_result) {
return errors::InvalidArgument("Could not parse int32 from ", input);
}
return absl::OkStatus();
}
Status ParseValue(StringPiece input, DataType* value) {
*value = ParseTFDataType(input);
return absl::OkStatus();
}
Status ParseValue(StringPiece input, std::string* value) {
*value = std::string(input);
return absl::OkStatus();
}
Status ParseValue(StringPiece input, std::vector<int32>* value) {
std::vector<std::string> parts = str_util::Split(input, ",");
value->reserve(parts.size());
for (const auto& value_str : parts) {
int32_t value_int;
bool parse_result = absl::SimpleAtoi(value_str, &value_int);
if (!parse_result) {
return errors::InvalidArgument("Could not parse list of integers from ",
input);
}
value->push_back(value_int);
}
return absl::OkStatus();
}
Status ParseValue(StringPiece input, Padding* value) {
return GetPaddingFromString(input, value);
}
Status AddOpAttr(const std::string& name, const std::string& attr_value,
tfrt::OpAttrs* opattrs) {
Status s;
std::vector<absl::string_view> value_split = tfd::AttrValueSplit(attr_value);
auto& type = value_split[0];
auto& value = value_split[1];
if (type == "bool") {
bool val;
s = ParseValue(value, &val);
opattrs->Set<bool>(name, val);
} else if (type == "i32") {
int32_t val;
s = ParseValue(value, &val);
opattrs->Set<int32>(name, val);
} else if (type == "string" || type == "padding") {
std::string val;
s = ParseValue(value, &val);
opattrs->SetString(name, val);
} else if (type == "tfdtype") {
DataType val;
s = ParseValue(value, &val);
opattrs->Set<tfrt::OpAttrType>(name, tfd::ConvertFromTfDataType(val));
} else if (type == "list(i32)") {
std::vector<int32> val;
s = ParseValue(value, &val);
opattrs->SetArray<int32>(name, val);
}
return s;
}
Status FillOpAttrs(tfrt::RemainingAttributes attrs, tfrt::OpAttrs* opattrs) {
int num_tf_attrs = attrs.size() / 2;
Status status;
for (int i = 0; i < num_tf_attrs; ++i) {
std::string name = attrs.GetStringAttribute(i * 2).str();
std::string attr_value = attrs.GetStringAttribute(i * 2 + 1).str();
Status s = AddOpAttr(name, attr_value, opattrs);
status.Update(s);
}
return status;
}
} | #include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/support/forward_decls.h"
using llvm::ArrayRef;
using tfrt::OpAttrs;
using tfrt::OpAttrType;
namespace tensorflow {
namespace {
TEST(AttrUtilTest, TestGetBoolAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "bool$true", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "bool$false", &opattrs));
ASSERT_TRUE(opattrs.GetAsserting<bool>("foo"));
ASSERT_FALSE(opattrs.GetAsserting<bool>("bar"));
}
TEST(AttrUtilTest, TestGetIntAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "i32$-2", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "i32$0", &opattrs));
TF_ASSERT_OK(AddOpAttr("baz", "i32$123", &opattrs));
ASSERT_EQ(opattrs.GetAsserting<int32>("foo"), -2);
ASSERT_EQ(opattrs.GetAsserting<int32>("bar"), 0);
ASSERT_EQ(opattrs.GetAsserting<int32>("baz"), 123);
Status s = AddOpAttr("invalid", "i32$4.5", &opattrs);
ASSERT_FALSE(s.ok());
}
TEST(AttrUtilTest, TestGetDTypeAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "tfdtype$DT_INT32", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "tfdtype$DT_FLOAT", &opattrs));
ASSERT_EQ(opattrs.GetAsserting<OpAttrType>("foo"), OpAttrType::I32);
ASSERT_EQ(opattrs.GetAsserting<OpAttrType>("bar"), OpAttrType::F32);
}
TEST(AttrUtilTest, TestGetIntListAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "list(i32)$", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "list(i32)$1", &opattrs));
TF_ASSERT_OK(AddOpAttr("baz", "list(i32)$1,2,3", &opattrs));
ArrayRef<int32> v1, v2, v3;
std::vector<int32> expected_v1;
std::vector<int32> expected_v2 = {1};
std::vector<int32> expected_v3 = {1, 2, 3};
ArrayRef<int32> expected_v1_ref(expected_v1);
ArrayRef<int32> expected_v2_ref(expected_v2);
ArrayRef<int32> expected_v3_ref(expected_v3);
ASSERT_TRUE(opattrs.GetArray<int32>("foo", &v1));
ASSERT_TRUE(opattrs.GetArray<int32>("bar", &v2));
ASSERT_TRUE(opattrs.GetArray<int32>("baz", &v3));
ASSERT_EQ(v1, expected_v1_ref);
ASSERT_EQ(v2, expected_v2_ref);
ASSERT_EQ(v3, expected_v3_ref);
}
TEST(AttrUtilTest, TestGetStrAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "string$", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "string$test", &opattrs));
ASSERT_EQ(opattrs.GetStringAsserting("foo"), "");
ASSERT_EQ(opattrs.GetStringAsserting("bar"), "test");
}
TEST(AttrUtilTest, TestGetPaddingAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "padding$VALID", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "padding$SAME", &opattrs));
ASSERT_EQ(opattrs.GetStringAsserting("foo"), "VALID");
ASSERT_EQ(opattrs.GetStringAsserting("bar"), "SAME");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/attr_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/attr_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e029fa7-a92b-4f6b-89f9-6779c761db00 | cpp | google/cel-cpp | minimal_descriptor_pool | internal/minimal_descriptor_pool.cc | internal/minimal_descriptor_pool_test.cc | #include "internal/minimal_descriptor_pool.h"
#include <cstdint>
#include "google/protobuf/descriptor.pb.h"
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "google/protobuf/descriptor.h"
namespace cel::internal {
namespace {
ABSL_CONST_INIT const uint8_t kMinimalDescriptorSet[] = {
#include "internal/minimal_descriptor_set_embed.inc"
};
}
absl::Nonnull<const google::protobuf::DescriptorPool*> GetMinimalDescriptorPool() {
static absl::Nonnull<const google::protobuf::DescriptorPool* const> pool = []() {
google::protobuf::FileDescriptorSet file_desc_set;
ABSL_CHECK(file_desc_set.ParseFromArray(
kMinimalDescriptorSet, ABSL_ARRAYSIZE(kMinimalDescriptorSet)));
auto* pool = new google::protobuf::DescriptorPool();
for (const auto& file_desc : file_desc_set.file()) {
ABSL_CHECK(pool->BuildFile(file_desc) != nullptr);
}
return pool;
}();
return pool;
}
} | #include "internal/minimal_descriptor_pool.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
namespace cel::internal {
namespace {
using ::testing::NotNull;
TEST(MinimalDescriptorPool, NullValue) {
ASSERT_THAT(GetMinimalDescriptorPool()->FindEnumTypeByName(
"google.protobuf.NullValue"),
NotNull());
}
TEST(MinimalDescriptorPool, BoolValue) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.BoolValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_BOOLVALUE);
}
TEST(MinimalDescriptorPool, Int32Value) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Int32Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_INT32VALUE);
}
TEST(MinimalDescriptorPool, Int64Value) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Int64Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_INT64VALUE);
}
TEST(MinimalDescriptorPool, UInt32Value) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.UInt32Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_UINT32VALUE);
}
TEST(MinimalDescriptorPool, UInt64Value) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.UInt64Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_UINT64VALUE);
}
TEST(MinimalDescriptorPool, FloatValue) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.FloatValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_FLOATVALUE);
}
TEST(MinimalDescriptorPool, DoubleValue) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.DoubleValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_DOUBLEVALUE);
}
TEST(MinimalDescriptorPool, BytesValue) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.BytesValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_BYTESVALUE);
}
TEST(MinimalDescriptorPool, StringValue) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.StringValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_STRINGVALUE);
}
TEST(MinimalDescriptorPool, Any) {
const auto* desc =
GetMinimalDescriptorPool()->FindMessageTypeByName("google.protobuf.Any");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(), google::protobuf::Descriptor::WELLKNOWNTYPE_ANY);
}
TEST(MinimalDescriptorPool, Duration) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Duration");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_DURATION);
}
TEST(MinimalDescriptorPool, Timestamp) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Timestamp");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_TIMESTAMP);
}
TEST(MinimalDescriptorPool, Value) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Value");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(), google::protobuf::Descriptor::WELLKNOWNTYPE_VALUE);
}
TEST(MinimalDescriptorPool, ListValue) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.ListValue");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(),
google::protobuf::Descriptor::WELLKNOWNTYPE_LISTVALUE);
}
TEST(MinimalDescriptorPool, Struct) {
const auto* desc = GetMinimalDescriptorPool()->FindMessageTypeByName(
"google.protobuf.Struct");
ASSERT_THAT(desc, NotNull());
EXPECT_EQ(desc->well_known_type(), google::protobuf::Descriptor::WELLKNOWNTYPE_STRUCT);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/minimal_descriptor_pool.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/minimal_descriptor_pool_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e87940a3-3087-4512-85f9-cd3fc3a7d4f1 | cpp | abseil/abseil-cpp | type_traits | absl/meta/type_traits.h | absl/meta/type_traits_test.cc | #ifndef ABSL_META_TYPE_TRAITS_H_
#define ABSL_META_TYPE_TRAITS_H_
#include <cstddef>
#include <functional>
#include <string>
#include <type_traits>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#ifdef __cpp_lib_span
#include <span>
#endif
#ifdef ABSL_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__
#else
#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t)
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace type_traits_internal {
template <typename... Ts>
struct VoidTImpl {
using type = void;
};
template <class Enabler, template <class...> class Op, class... Args>
struct is_detected_impl {
using type = std::false_type;
};
template <template <class...> class Op, class... Args>
struct is_detected_impl<typename VoidTImpl<Op<Args...>>::type, Op, Args...> {
using type = std::true_type;
};
template <template <class...> class Op, class... Args>
struct is_detected : is_detected_impl<void, Op, Args...>::type {};
template <class Enabler, class To, template <class...> class Op, class... Args>
struct is_detected_convertible_impl {
using type = std::false_type;
};
template <class To, template <class...> class Op, class... Args>
struct is_detected_convertible_impl<
typename std::enable_if<std::is_convertible<Op<Args...>, To>::value>::type,
To, Op, Args...> {
using type = std::true_type;
};
template <class To, template <class...> class Op, class... Args>
struct is_detected_convertible
: is_detected_convertible_impl<void, To, Op, Args...>::type {};
}
template <typename... Ts>
using void_t = typename type_traits_internal::VoidTImpl<Ts...>::type;
template <typename... Ts>
struct conjunction : std::true_type {};
template <typename T, typename... Ts>
struct conjunction<T, Ts...>
: std::conditional<T::value, conjunction<Ts...>, T>::type {};
template <typename T>
struct conjunction<T> : T {};
template <typename... Ts>
struct disjunction : std::false_type {};
template <typename T, typename... Ts>
struct disjunction<T, Ts...>
: std::conditional<T::value, T, disjunction<Ts...>>::type {};
template <typename T>
struct disjunction<T> : T {};
template <typename T>
struct negation : std::integral_constant<bool, !T::value> {};
template <typename T>
struct is_function
: std::integral_constant<
bool, !(std::is_reference<T>::value ||
std::is_const<typename std::add_const<T>::type>::value)> {};
using std::is_copy_assignable;
using std::is_move_assignable;
using std::is_trivially_copy_assignable;
using std::is_trivially_copy_constructible;
using std::is_trivially_default_constructible;
using std::is_trivially_destructible;
using std::is_trivially_move_assignable;
using std::is_trivially_move_constructible;
#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L
template <typename T>
using remove_cvref = std::remove_cvref<T>;
template <typename T>
using remove_cvref_t = typename std::remove_cvref<T>::type;
#else
template <typename T>
struct remove_cvref {
using type =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
};
template <typename T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
template <typename T>
using remove_cv_t = typename std::remove_cv<T>::type;
template <typename T>
using remove_const_t = typename std::remove_const<T>::type;
template <typename T>
using remove_volatile_t = typename std::remove_volatile<T>::type;
template <typename T>
using add_cv_t = typename std::add_cv<T>::type;
template <typename T>
using add_const_t = typename std::add_const<T>::type;
template <typename T>
using add_volatile_t = typename std::add_volatile<T>::type;
template <typename T>
using remove_reference_t = typename std::remove_reference<T>::type;
template <typename T>
using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type;
template <typename T>
using add_rvalue_reference_t = typename std::add_rvalue_reference<T>::type;
template <typename T>
using remove_pointer_t = typename std::remove_pointer<T>::type;
template <typename T>
using add_pointer_t = typename std::add_pointer<T>::type;
template <typename T>
using make_signed_t = typename std::make_signed<T>::type;
template <typename T>
using make_unsigned_t = typename std::make_unsigned<T>::type;
template <typename T>
using remove_extent_t = typename std::remove_extent<T>::type;
template <typename T>
using remove_all_extents_t = typename std::remove_all_extents<T>::type;
template <typename T>
using decay_t = typename std::decay<T>::type;
template <bool B, typename T = void>
using enable_if_t = typename std::enable_if<B, T>::type;
template <bool B, typename T, typename F>
using conditional_t = typename std::conditional<B, T, F>::type;
template <typename... T>
using common_type_t = typename std::common_type<T...>::type;
template <typename T>
using underlying_type_t = typename std::underlying_type<T>::type;
namespace type_traits_internal {
#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
(defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
template <typename>
struct result_of;
template <typename F, typename... Args>
struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
#else
template <typename F>
using result_of = std::result_of<F>;
#endif
}
template <typename F>
using result_of_t = typename type_traits_internal::result_of<F>::type;
namespace type_traits_internal {
#if defined(_MSC_VER) || (defined(_LIBCPP_VERSION) && \
_LIBCPP_VERSION < 4000 && _LIBCPP_STD_VER > 11)
#define ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ 0
#else
#define ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ 1
#endif
#if !ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
template <typename Key, typename = size_t>
struct IsHashable : std::true_type {};
#else
template <typename Key, typename = void>
struct IsHashable : std::false_type {};
template <typename Key>
struct IsHashable<
Key,
absl::enable_if_t<std::is_convertible<
decltype(std::declval<std::hash<Key>&>()(std::declval<Key const&>())),
std::size_t>::value>> : std::true_type {};
#endif
struct AssertHashEnabledHelper {
private:
static void Sink(...) {}
struct NAT {};
template <class Key>
static auto GetReturnType(int)
-> decltype(std::declval<std::hash<Key>>()(std::declval<Key const&>()));
template <class Key>
static NAT GetReturnType(...);
template <class Key>
static std::nullptr_t DoIt() {
static_assert(IsHashable<Key>::value,
"std::hash<Key> does not provide a call operator");
static_assert(
std::is_default_constructible<std::hash<Key>>::value,
"std::hash<Key> must be default constructible when it is enabled");
static_assert(
std::is_copy_constructible<std::hash<Key>>::value,
"std::hash<Key> must be copy constructible when it is enabled");
static_assert(absl::is_copy_assignable<std::hash<Key>>::value,
"std::hash<Key> must be copy assignable when it is enabled");
using ReturnType = decltype(GetReturnType<Key>(0));
static_assert(std::is_same<ReturnType, NAT>::value ||
std::is_same<ReturnType, size_t>::value,
"std::hash<Key> must return size_t");
return nullptr;
}
template <class... Ts>
friend void AssertHashEnabled();
};
template <class... Ts>
inline void AssertHashEnabled() {
using Helper = AssertHashEnabledHelper;
Helper::Sink(Helper::DoIt<Ts>()...);
}
}
namespace swap_internal {
using std::swap;
void swap();
template <class T>
using IsSwappableImpl = decltype(swap(std::declval<T&>(), std::declval<T&>()));
template <class T,
class IsNoexcept = std::integral_constant<
bool, noexcept(swap(std::declval<T&>(), std::declval<T&>()))>>
using IsNothrowSwappableImpl = typename std::enable_if<IsNoexcept::value>::type;
template <class T>
struct IsSwappable
: absl::type_traits_internal::is_detected<IsSwappableImpl, T> {};
template <class T>
struct IsNothrowSwappable
: absl::type_traits_internal::is_detected<IsNothrowSwappableImpl, T> {};
template <class T, absl::enable_if_t<IsSwappable<T>::value, int> = 0>
void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable<T>::value) {
swap(lhs, rhs);
}
using StdSwapIsUnconstrained = IsSwappable<void()>;
}
namespace type_traits_internal {
using swap_internal::IsNothrowSwappable;
using swap_internal::IsSwappable;
using swap_internal::StdSwapIsUnconstrained;
using swap_internal::Swap;
}
#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
(defined(__cpp_impl_trivially_relocatable) || \
(!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__)))
template <class T>
struct is_trivially_relocatable
: std::integral_constant<bool, __is_trivially_relocatable(T)> {};
#else
template <class T>
struct is_trivially_relocatable : std::is_trivially_copyable<T> {};
#endif
#if defined(ABSL_HAVE_CONSTANT_EVALUATED)
constexpr bool is_constant_evaluated() noexcept {
#ifdef __cpp_lib_is_constant_evaluated
return std::is_constant_evaluated();
#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
return __builtin_is_constant_evaluated();
#endif
}
#endif
namespace type_traits_internal {
template <typename T, typename = void>
struct IsOwnerImpl : std::false_type {
static_assert(std::is_same<T, absl::remove_cvref_t<T>>::value,
"type must lack qualifiers");
};
template <typename T>
struct IsOwnerImpl<
T,
std::enable_if_t<std::is_class<typename T::absl_internal_is_view>::value>>
: absl::negation<typename T::absl_internal_is_view> {};
template <typename T>
struct IsOwner : IsOwnerImpl<T> {};
template <typename T, typename Traits, typename Alloc>
struct IsOwner<std::basic_string<T, Traits, Alloc>> : std::true_type {};
template <typename T, typename Alloc>
struct IsOwner<std::vector<T, Alloc>> : std::true_type {};
template <typename T, typename = void>
struct IsViewImpl : std::false_type {
static_assert(std::is_same<T, absl::remove_cvref_t<T>>::value,
"type must lack qualifiers");
};
template <typename T>
struct IsViewImpl<
T,
std::enable_if_t<std::is_class<typename T::absl_internal_is_view>::value>>
: T::absl_internal_is_view {};
template <typename T>
struct IsView : std::integral_constant<bool, std::is_pointer<T>::value ||
IsViewImpl<T>::value> {};
#ifdef ABSL_HAVE_STD_STRING_VIEW
template <typename Char, typename Traits>
struct IsView<std::basic_string_view<Char, Traits>> : std::true_type {};
#endif
#ifdef __cpp_lib_span
template <typename T>
struct IsView<std::span<T>> : std::true_type {};
#endif
template <typename T, typename U>
using IsLifetimeBoundAssignment =
std::integral_constant<bool, IsView<absl::remove_cvref_t<T>>::value &&
IsOwner<absl::remove_cvref_t<U>>::value>;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/meta/type_traits.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#ifdef ABSL_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
namespace {
using ::testing::StaticAssertTypeEq;
template <typename T>
using IsOwnerAndNotView =
absl::conjunction<absl::type_traits_internal::IsOwner<T>,
absl::negation<absl::type_traits_internal::IsView<T>>>;
static_assert(IsOwnerAndNotView<std::vector<int>>::value,
"vector is an owner, not a view");
static_assert(IsOwnerAndNotView<std::string>::value,
"string is an owner, not a view");
static_assert(IsOwnerAndNotView<std::wstring>::value,
"wstring is an owner, not a view");
#ifdef ABSL_HAVE_STD_STRING_VIEW
static_assert(!IsOwnerAndNotView<std::string_view>::value,
"string_view is a view, not an owner");
static_assert(!IsOwnerAndNotView<std::wstring_view>::value,
"wstring_view is a view, not an owner");
#endif
template <class T, class U>
struct simple_pair {
T first;
U second;
};
struct Dummy {};
struct ReturnType {};
struct ConvertibleToReturnType {
operator ReturnType() const;
};
struct StructA {};
struct StructB {};
struct StructC {};
struct TypeWithBarFunction {
template <class T,
absl::enable_if_t<std::is_same<T&&, StructA&>::value, int> = 0>
ReturnType bar(T&&, const StructB&, StructC&&) &&;
};
struct TypeWithBarFunctionAndConvertibleReturnType {
template <class T,
absl::enable_if_t<std::is_same<T&&, StructA&>::value, int> = 0>
ConvertibleToReturnType bar(T&&, const StructB&, StructC&&) &&;
};
template <class Class, class... Ts>
using BarIsCallableImpl =
decltype(std::declval<Class>().bar(std::declval<Ts>()...));
template <class Class, class... T>
using BarIsCallable =
absl::type_traits_internal::is_detected<BarIsCallableImpl, Class, T...>;
template <class Class, class... T>
using BarIsCallableConv = absl::type_traits_internal::is_detected_convertible<
ReturnType, BarIsCallableImpl, Class, T...>;
TEST(IsDetectedTest, BasicUsage) {
EXPECT_TRUE((BarIsCallable<TypeWithBarFunction, StructA&, const StructB&,
StructC>::value));
EXPECT_TRUE(
(BarIsCallable<TypeWithBarFunction, StructA&, StructB&, StructC>::value));
EXPECT_TRUE(
(BarIsCallable<TypeWithBarFunction, StructA&, StructB, StructC>::value));
EXPECT_FALSE((BarIsCallable<int, StructA&, const StructB&, StructC>::value));
EXPECT_FALSE((BarIsCallable<TypeWithBarFunction&, StructA&, const StructB&,
StructC>::value));
EXPECT_FALSE((BarIsCallable<TypeWithBarFunction, StructA, const StructB&,
StructC>::value));
}
TEST(IsDetectedConvertibleTest, BasicUsage) {
EXPECT_TRUE((BarIsCallableConv<TypeWithBarFunction, StructA&, const StructB&,
StructC>::value));
EXPECT_TRUE((BarIsCallableConv<TypeWithBarFunction, StructA&, StructB&,
StructC>::value));
EXPECT_TRUE((BarIsCallableConv<TypeWithBarFunction, StructA&, StructB,
StructC>::value));
EXPECT_TRUE((BarIsCallableConv<TypeWithBarFunctionAndConvertibleReturnType,
StructA&, const StructB&, StructC>::value));
EXPECT_TRUE((BarIsCallableConv<TypeWithBarFunctionAndConvertibleReturnType,
StructA&, StructB&, StructC>::value));
EXPECT_TRUE((BarIsCallableConv<TypeWithBarFunctionAndConvertibleReturnType,
StructA&, StructB, StructC>::value));
EXPECT_FALSE(
(BarIsCallableConv<int, StructA&, const StructB&, StructC>::value));
EXPECT_FALSE((BarIsCallableConv<TypeWithBarFunction&, StructA&,
const StructB&, StructC>::value));
EXPECT_FALSE((BarIsCallableConv<TypeWithBarFunction, StructA, const StructB&,
StructC>::value));
EXPECT_FALSE((BarIsCallableConv<TypeWithBarFunctionAndConvertibleReturnType&,
StructA&, const StructB&, StructC>::value));
EXPECT_FALSE((BarIsCallableConv<TypeWithBarFunctionAndConvertibleReturnType,
StructA, const StructB&, StructC>::value));
}
TEST(VoidTTest, BasicUsage) {
StaticAssertTypeEq<void, absl::void_t<Dummy>>();
StaticAssertTypeEq<void, absl::void_t<Dummy, Dummy, Dummy>>();
}
TEST(ConjunctionTest, BasicBooleanLogic) {
EXPECT_TRUE(absl::conjunction<>::value);
EXPECT_TRUE(absl::conjunction<std::true_type>::value);
EXPECT_TRUE((absl::conjunction<std::true_type, std::true_type>::value));
EXPECT_FALSE((absl::conjunction<std::true_type, std::false_type>::value));
EXPECT_FALSE((absl::conjunction<std::false_type, std::true_type>::value));
EXPECT_FALSE((absl::conjunction<std::false_type, std::false_type>::value));
}
struct MyTrueType {
static constexpr bool value = true;
};
struct MyFalseType {
static constexpr bool value = false;
};
TEST(ConjunctionTest, ShortCircuiting) {
EXPECT_FALSE(
(absl::conjunction<std::true_type, std::false_type, Dummy>::value));
EXPECT_TRUE((std::is_base_of<MyFalseType,
absl::conjunction<std::true_type, MyFalseType,
std::false_type>>::value));
EXPECT_TRUE(
(std::is_base_of<MyTrueType,
absl::conjunction<std::true_type, MyTrueType>>::value));
}
TEST(DisjunctionTest, BasicBooleanLogic) {
EXPECT_FALSE(absl::disjunction<>::value);
EXPECT_FALSE(absl::disjunction<std::false_type>::value);
EXPECT_TRUE((absl::disjunction<std::true_type, std::true_type>::value));
EXPECT_TRUE((absl::disjunction<std::true_type, std::false_type>::value));
EXPECT_TRUE((absl::disjunction<std::false_type, std::true_type>::value));
EXPECT_FALSE((absl::disjunction<std::false_type, std::false_type>::value));
}
TEST(DisjunctionTest, ShortCircuiting) {
EXPECT_TRUE(
(absl::disjunction<std::false_type, std::true_type, Dummy>::value));
EXPECT_TRUE((
std::is_base_of<MyTrueType, absl::disjunction<std::false_type, MyTrueType,
std::true_type>>::value));
EXPECT_TRUE((
std::is_base_of<MyFalseType,
absl::disjunction<std::false_type, MyFalseType>>::value));
}
TEST(NegationTest, BasicBooleanLogic) {
EXPECT_FALSE(absl::negation<std::true_type>::value);
EXPECT_FALSE(absl::negation<MyTrueType>::value);
EXPECT_TRUE(absl::negation<std::false_type>::value);
EXPECT_TRUE(absl::negation<MyFalseType>::value);
}
class Trivial {
int n_;
};
struct TrivialDestructor {
~TrivialDestructor() = default;
};
struct NontrivialDestructor {
~NontrivialDestructor() {}
};
struct DeletedDestructor {
~DeletedDestructor() = delete;
};
class TrivialDefaultCtor {
public:
TrivialDefaultCtor() = default;
explicit TrivialDefaultCtor(int n) : n_(n) {}
private:
int n_;
};
class NontrivialDefaultCtor {
public:
NontrivialDefaultCtor() : n_(1) {}
private:
int n_;
};
class DeletedDefaultCtor {
public:
DeletedDefaultCtor() = delete;
explicit DeletedDefaultCtor(int n) : n_(n) {}
private:
int n_;
};
class TrivialMoveCtor {
public:
explicit TrivialMoveCtor(int n) : n_(n) {}
TrivialMoveCtor(TrivialMoveCtor&&) = default;
TrivialMoveCtor& operator=(const TrivialMoveCtor& t) {
n_ = t.n_;
return *this;
}
private:
int n_;
};
class NontrivialMoveCtor {
public:
explicit NontrivialMoveCtor(int n) : n_(n) {}
NontrivialMoveCtor(NontrivialMoveCtor&& t) noexcept : n_(t.n_) {}
NontrivialMoveCtor& operator=(const NontrivialMoveCtor&) = default;
private:
int n_;
};
class TrivialCopyCtor {
public:
explicit TrivialCopyCtor(int n) : n_(n) {}
TrivialCopyCtor(const TrivialCopyCtor&) = default;
TrivialCopyCtor& operator=(const TrivialCopyCtor& t) {
n_ = t.n_;
return *this;
}
private:
int n_;
};
class NontrivialCopyCtor {
public:
explicit NontrivialCopyCtor(int n) : n_(n) {}
NontrivialCopyCtor(const NontrivialCopyCtor& t) : n_(t.n_) {}
NontrivialCopyCtor& operator=(const NontrivialCopyCtor&) = default;
private:
int n_;
};
class DeletedCopyCtor {
public:
explicit DeletedCopyCtor(int n) : n_(n) {}
DeletedCopyCtor(const DeletedCopyCtor&) = delete;
DeletedCopyCtor& operator=(const DeletedCopyCtor&) = default;
private:
int n_;
};
class TrivialMoveAssign {
public:
explicit TrivialMoveAssign(int n) : n_(n) {}
TrivialMoveAssign(const TrivialMoveAssign& t) : n_(t.n_) {}
TrivialMoveAssign& operator=(TrivialMoveAssign&&) = default;
~TrivialMoveAssign() {}
private:
int n_;
};
class NontrivialMoveAssign {
public:
explicit NontrivialMoveAssign(int n) : n_(n) {}
NontrivialMoveAssign(const NontrivialMoveAssign&) = default;
NontrivialMoveAssign& operator=(NontrivialMoveAssign&& t) noexcept {
n_ = t.n_;
return *this;
}
private:
int n_;
};
class TrivialCopyAssign {
public:
explicit TrivialCopyAssign(int n) : n_(n) {}
TrivialCopyAssign(const TrivialCopyAssign& t) : n_(t.n_) {}
TrivialCopyAssign& operator=(const TrivialCopyAssign& t) = default;
~TrivialCopyAssign() {}
private:
int n_;
};
class NontrivialCopyAssign {
public:
explicit NontrivialCopyAssign(int n) : n_(n) {}
NontrivialCopyAssign(const NontrivialCopyAssign&) = default;
NontrivialCopyAssign& operator=(const NontrivialCopyAssign& t) {
n_ = t.n_;
return *this;
}
private:
int n_;
};
class DeletedCopyAssign {
public:
explicit DeletedCopyAssign(int n) : n_(n) {}
DeletedCopyAssign(const DeletedCopyAssign&) = default;
DeletedCopyAssign& operator=(const DeletedCopyAssign&) = delete;
private:
int n_;
};
struct MovableNonCopyable {
MovableNonCopyable() = default;
MovableNonCopyable(const MovableNonCopyable&) = delete;
MovableNonCopyable(MovableNonCopyable&&) = default;
MovableNonCopyable& operator=(const MovableNonCopyable&) = delete;
MovableNonCopyable& operator=(MovableNonCopyable&&) = default;
};
struct NonCopyableOrMovable {
NonCopyableOrMovable() = default;
virtual ~NonCopyableOrMovable() = default;
NonCopyableOrMovable(const NonCopyableOrMovable&) = delete;
NonCopyableOrMovable(NonCopyableOrMovable&&) = delete;
NonCopyableOrMovable& operator=(const NonCopyableOrMovable&) = delete;
NonCopyableOrMovable& operator=(NonCopyableOrMovable&&) = delete;
};
class Base {
public:
virtual ~Base() {}
};
TEST(TypeTraitsTest, TestIsFunction) {
struct Callable {
void operator()() {}
};
EXPECT_TRUE(absl::is_function<void()>::value);
EXPECT_TRUE(absl::is_function<void()&>::value);
EXPECT_TRUE(absl::is_function<void() const>::value);
EXPECT_TRUE(absl::is_function<void() noexcept>::value);
EXPECT_TRUE(absl::is_function<void(...) noexcept>::value);
EXPECT_FALSE(absl::is_function<void (*)()>::value);
EXPECT_FALSE(absl::is_function<void (&)()>::value);
EXPECT_FALSE(absl::is_function<int>::value);
EXPECT_FALSE(absl::is_function<Callable>::value);
}
TEST(TypeTraitsTest, TestRemoveCVRef) {
EXPECT_TRUE(
(std::is_same<typename absl::remove_cvref<int>::type, int>::value));
EXPECT_TRUE(
(std::is_same<typename absl::remove_cvref<int&>::type, int>::value));
EXPECT_TRUE(
(std::is_same<typename absl::remove_cvref<int&&>::type, int>::value));
EXPECT_TRUE((
std::is_same<typename absl::remove_cvref<const int&>::type, int>::value));
EXPECT_TRUE(
(std::is_same<typename absl::remove_cvref<int*>::type, int*>::value));
EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int*>::type,
const int*>::value));
EXPECT_TRUE(
(std::is_same<typename absl::remove_cvref<int[2]>::type, int[2]>::value));
EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int(&)[2]>::type,
int[2]>::value));
EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int(&&)[2]>::type,
int[2]>::value));
EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int[2]>::type,
int[2]>::value));
EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int(&)[2]>::type,
int[2]>::value));
EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int(&&)[2]>::type,
int[2]>::value));
}
#define ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(trait_name, ...) \
EXPECT_TRUE((std::is_same<typename std::trait_name<__VA_ARGS__>::type, \
absl::trait_name##_t<__VA_ARGS__>>::value))
TEST(TypeTraitsTest, TestRemoveCVAliases) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_cv, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_cv, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_cv, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_cv, const volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_const, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_const, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_const, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_const, const volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_volatile, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_volatile, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_volatile, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_volatile, const volatile int);
}
TEST(TypeTraitsTest, TestAddCVAliases) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_cv, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_cv, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_cv, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_cv, const volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_const, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_const, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_const, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_const, const volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_volatile, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_volatile, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_volatile, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_volatile, const volatile int);
}
TEST(TypeTraitsTest, TestReferenceAliases) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_reference, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_reference, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_reference, int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_reference, volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_reference, int&&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_reference, volatile int&&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_lvalue_reference, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_lvalue_reference, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_lvalue_reference, int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_lvalue_reference, volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_lvalue_reference, int&&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_lvalue_reference, volatile int&&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_rvalue_reference, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_rvalue_reference, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_rvalue_reference, int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_rvalue_reference, volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_rvalue_reference, int&&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_rvalue_reference, volatile int&&);
}
TEST(TypeTraitsTest, TestPointerAliases) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_pointer, int*);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_pointer, volatile int*);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_pointer, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(add_pointer, volatile int);
}
TEST(TypeTraitsTest, TestSignednessAliases) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_signed, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_signed, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_signed, unsigned);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_signed, volatile unsigned);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_unsigned, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_unsigned, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_unsigned, unsigned);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(make_unsigned, volatile unsigned);
}
TEST(TypeTraitsTest, TestExtentAliases) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_extent, int[]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_extent, int[1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_extent, int[1][1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_extent, int[][1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_all_extents, int[]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_all_extents, int[1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_all_extents, int[1][1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_all_extents, int[][1]);
}
TEST(TypeTraitsTest, TestDecay) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const volatile int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const volatile int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int[1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int[1][1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int[][1]);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int());
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int(float));
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int(char, ...));
}
struct TypeA {};
struct TypeB {};
struct TypeC {};
struct TypeD {};
template <typename T>
struct Wrap {};
enum class TypeEnum { A, B, C, D };
struct GetTypeT {
template <typename T,
absl::enable_if_t<std::is_same<T, TypeA>::value, int> = 0>
TypeEnum operator()(Wrap<T>) const {
return TypeEnum::A;
}
template <typename T,
absl::enable_if_t<std::is_same<T, TypeB>::value, int> = 0>
TypeEnum operator()(Wrap<T>) const {
return TypeEnum::B;
}
template <typename T,
absl::enable_if_t<std::is_same<T, TypeC>::value, int> = 0>
TypeEnum operator()(Wrap<T>) const {
return TypeEnum::C;
}
} constexpr GetType = {};
TEST(TypeTraitsTest, TestEnableIf) {
EXPECT_EQ(TypeEnum::A, GetType(Wrap<TypeA>()));
EXPECT_EQ(TypeEnum::B, GetType(Wrap<TypeB>()));
EXPECT_EQ(TypeEnum::C, GetType(Wrap<TypeC>()));
}
TEST(TypeTraitsTest, TestConditional) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(conditional, true, int, char);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(conditional, false, int, char);
}
TEST(TypeTraitsTest, TestCommonType) {
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(common_type, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(common_type, int, char);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(common_type, int, char, int);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(common_type, int&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(common_type, int, char&);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(common_type, int, char, int&);
}
TEST(TypeTraitsTest, TestUnderlyingType) {
enum class enum_char : char {};
enum class enum_long_long : long long {};
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(underlying_type, enum_char);
ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(underlying_type, enum_long_long);
}
struct GetTypeExtT {
template <typename T>
absl::result_of_t<const GetTypeT&(T)> operator()(T&& arg) const {
return GetType(std::forward<T>(arg));
}
TypeEnum operator()(Wrap<TypeD>) const { return TypeEnum::D; }
} constexpr GetTypeExt = {};
TEST(TypeTraitsTest, TestResultOf) {
EXPECT_EQ(TypeEnum::A, GetTypeExt(Wrap<TypeA>()));
EXPECT_EQ(TypeEnum::B, GetTypeExt(Wrap<TypeB>()));
EXPECT_EQ(TypeEnum::C, GetTypeExt(Wrap<TypeC>()));
EXPECT_EQ(TypeEnum::D, GetTypeExt(Wrap<TypeD>()));
}
namespace adl_namespace {
struct DeletedSwap {};
void swap(DeletedSwap&, DeletedSwap&) = delete;
struct SpecialNoexceptSwap {
SpecialNoexceptSwap(SpecialNoexceptSwap&&) {}
SpecialNoexceptSwap& operator=(SpecialNoexceptSwap&&) { return *this; }
~SpecialNoexceptSwap() = default;
};
void swap(SpecialNoexceptSwap&, SpecialNoexceptSwap&) noexcept {}
}
TEST(TypeTraitsTest, IsSwappable) {
using absl::type_traits_internal::IsSwappable;
using absl::type_traits_internal::StdSwapIsUnconstrained;
EXPECT_TRUE(IsSwappable<int>::value);
struct S {};
EXPECT_TRUE(IsSwappable<S>::value);
struct NoConstruct {
NoConstruct(NoConstruct&&) = delete;
NoConstruct& operator=(NoConstruct&&) { return *this; }
~NoConstruct() = default;
};
EXPECT_EQ(IsSwappable<NoConstruct>::value, StdSwapIsUnconstrained::value);
struct NoAssign {
NoAssign(NoAssign&&) {}
NoAssign& operator=(NoAssign&&) = delete;
~NoAssign() = default;
};
EXPECT_EQ(IsSwappable<NoAssign>::value, StdSwapIsUnconstrained::value);
EXPECT_FALSE(IsSwappable<adl_namespace::DeletedSwap>::value);
EXPECT_TRUE(IsSwappable<adl_namespace::SpecialNoexceptSwap>::value);
}
TEST(TypeTraitsTest, IsNothrowSwappable) {
using absl::type_traits_internal::IsNothrowSwappable;
using absl::type_traits_internal::StdSwapIsUnconstrained;
EXPECT_TRUE(IsNothrowSwappable<int>::value);
struct NonNoexceptMoves {
NonNoexceptMoves(NonNoexceptMoves&&) {}
NonNoexceptMoves& operator=(NonNoexceptMoves&&) { return *this; }
~NonNoexceptMoves() = default;
};
EXPECT_FALSE(IsNothrowSwappable<NonNoexceptMoves>::value);
struct NoConstruct {
NoConstruct(NoConstruct&&) = delete;
NoConstruct& operator=(NoConstruct&&) { return *this; }
~NoConstruct() = default;
};
EXPECT_FALSE(IsNothrowSwappable<NoConstruct>::value);
struct NoAssign {
NoAssign(NoAssign&&) {}
NoAssign& operator=(NoAssign&&) = delete;
~NoAssign() = default;
};
EXPECT_FALSE(IsNothrowSwappable<NoAssign>::value);
EXPECT_FALSE(IsNothrowSwappable<adl_namespace::DeletedSwap>::value);
EXPECT_TRUE(IsNothrowSwappable<adl_namespace::SpecialNoexceptSwap>::value);
}
TEST(TriviallyRelocatable, PrimitiveTypes) {
static_assert(absl::is_trivially_relocatable<int>::value, "");
static_assert(absl::is_trivially_relocatable<char>::value, "");
static_assert(absl::is_trivially_relocatable<void*>::value, "");
}
TEST(TriviallyRelocatable, UserDefinedTriviallyRelocatable) {
struct S {
int x;
int y;
};
static_assert(absl::is_trivially_relocatable<S>::value, "");
}
TEST(TriviallyRelocatable, UserProvidedMoveConstructor) {
struct S {
S(S&&) {}
};
static_assert(!absl::is_trivially_relocatable<S>::value, "");
}
TEST(TriviallyRelocatable, UserProvidedCopyConstructor) {
struct S {
S(const S&) {}
};
static_assert(!absl::is_trivially_relocatable<S>::value, "");
}
TEST(TriviallyRelocatable, UserProvidedCopyAssignment) {
struct S {
S(const S&) = default;
S& operator=(const S&) {
return *this;
}
};
static_assert(!absl::is_trivially_relocatable<S>::value, "");
}
TEST(TriviallyRelocatable, UserProvidedMoveAssignment) {
struct S {
S(S&&) = default;
S& operator=(S&&) { return *this; }
};
static_assert(!absl::is_trivially_relocatable<S>::value, "");
}
TEST(TriviallyRelocatable, UserProvidedDestructor) {
struct S {
~S() {}
};
static_assert(!absl::is_trivially_relocatable<S>::value, "");
}
#if defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) && \
ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
(defined(__cpp_impl_trivially_relocatable) || \
(!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__)))
TEST(TriviallyRelocatable, TrivialAbi) {
struct ABSL_ATTRIBUTE_TRIVIAL_ABI S {
S(S&&) {}
S(const S&) {}
void operator=(S&&) {}
void operator=(const S&) {}
~S() {}
};
static_assert(absl::is_trivially_relocatable<S>::value, "");
}
#endif
#ifdef ABSL_HAVE_CONSTANT_EVALUATED
constexpr int64_t NegateIfConstantEvaluated(int64_t i) {
if (absl::is_constant_evaluated()) {
return -i;
} else {
return i;
}
}
#endif
TEST(IsConstantEvaluated, is_constant_evaluated) {
#ifdef ABSL_HAVE_CONSTANT_EVALUATED
constexpr int64_t constant = NegateIfConstantEvaluated(42);
EXPECT_EQ(constant, -42);
int64_t now = absl::ToUnixSeconds(absl::Now());
int64_t not_constant = NegateIfConstantEvaluated(now);
EXPECT_EQ(not_constant, now);
static int64_t const_init = NegateIfConstantEvaluated(42);
EXPECT_EQ(const_init, -42);
#else
GTEST_SKIP() << "absl::is_constant_evaluated is not defined";
#endif
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/meta/type_traits.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/meta/type_traits_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
c8b735fe-1061-46a4-98c4-74adb45572e5 | cpp | tensorflow/tensorflow | node_io_dump_rewriter | tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.cc | tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter_test.cc | #include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
absl::StatusOr<std::string> GetDumpDir(absl::string_view dump_dir) {
if (!dump_dir.empty()) return std::string(dump_dir);
const char* prefix = getenv("TF_DUMP_GRAPH_PREFIX");
if (prefix != nullptr) return std::string(prefix);
return errors::InvalidArgument("TF_DUMP_GRAPH_PREFIX not specified");
}
Status InsertDumpOpsForNode(Graph& graph, Node& node,
absl::string_view dump_dir) {
auto insert = [&](bool is_input, const std::vector<const Edge*> edges) {
for (const Edge* edge : edges) {
if (edge->IsControlEdge()) continue;
Node* dump_node;
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat(edge->src()->name(), "/", edge->src_output(),
"/debug_identity"),
"DebugIdentityV3")
.Attr("io_of_node", node.name())
.Attr("is_input", is_input)
.Attr("io_index",
is_input ? edge->dst_input() : edge->src_output())
.Attr("tensor_name",
absl::StrCat(edge->src()->name(), ":", edge->src_output()))
.Attr("debug_urls", {absl::StrCat("file:
.Input(edge->src(), edge->src_output())
.Finalize(&graph, &dump_node));
TF_RETURN_IF_ERROR(
graph.UpdateEdge(dump_node, 0, edge->dst(), edge->dst_input()));
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(true,
{node.in_edges().begin(), node.in_edges().end()}));
TF_RETURN_IF_ERROR(insert(
false, {node.out_edges().begin(), node.out_edges().end()}));
return absl::OkStatus();
}
}
Status InsertDumpOps(Graph& graph,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
TF_ASSIGN_OR_RETURN(auto dir, GetDumpDir(dump_dir));
auto insert = [&](Graph& graph) {
for (Node* node : graph.op_nodes()) {
if (nodes_to_dump.contains(node->name())) {
TF_RETURN_IF_ERROR(InsertDumpOpsForNode(graph, *node, dir));
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(graph));
for (const auto& fname : graph.flib_def().ListFunctionNames()) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*graph.flib_def().Find(fname), AttrSlice(), &graph.flib_def(), &fbody));
TF_RETURN_IF_ERROR(insert(*fbody->graph));
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*fbody->graph, fname, &new_fdef));
TF_RETURN_IF_ERROR(
graph.mutable_flib_def()->ReplaceFunction(fname, new_fdef));
}
return absl::OkStatus();
}
Status InsertDumpOps(MetaGraphDef& meta_graph_def,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph({}, meta_graph_def.graph_def(), &graph));
TF_RETURN_IF_ERROR(InsertDumpOps(graph, nodes_to_dump, dump_dir));
graph.ToGraphDef(meta_graph_def.mutable_graph_def());
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <dirent.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/tfrt/saved_model/saved_model.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
const Node* FindNode(const Graph* graph, absl::string_view node_name) {
for (Node* node : graph->nodes()) {
if (node->name() == node_name) return node;
}
return nullptr;
}
const Node* GetInputNode(const Node* node, size_t index) {
const Node* input_node;
CHECK_OK(node->input_node(index, &input_node));
return input_node;
}
const Node* GetOutputNode(const Node* node, size_t index) {
for (const Edge* edge : node->out_edges()) {
if (edge->src_output() == index) return edge->dst();
}
return nullptr;
}
absl::StatusOr<std::vector<std::string>> GetFilenames(
absl::string_view dump_dir) {
auto dump_sub_dir = absl::StrCat(dump_dir, "/", kDumpSubDirName);
DIR* dir = opendir(dump_sub_dir.data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", dump_sub_dir));
}
std::vector<std::string> step_dirs;
struct dirent* entry;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type != DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found non-directory entry under dump_sub_dir: ", entry->d_name));
}
step_dirs.push_back(absl::StrCat(dump_sub_dir, "/", entry->d_name));
}
closedir(dir);
CHECK_EQ(step_dirs.size(), 1);
dir = opendir(step_dirs[0].data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", step_dirs[0]));
}
std::vector<std::string> filenames;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type == DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found directory entry under step_dir: ", entry->d_name));
}
filenames.push_back(entry->d_name);
}
closedir(dir);
return filenames;
}
TEST(NodeIoDumpRewriterTest, OnGraph) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input_a = ops::Placeholder(scope.WithOpName("input_a"), DT_INT32);
auto input_b = ops::Placeholder(scope.WithOpName("input_b"), DT_INT32);
auto add = ops::Add(scope.WithOpName("add"), input_a, input_b);
auto output = ops::Identity(scope.WithOpName("output"), add);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnGraph");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(*graph, {"add"}, dump_dir));
auto* node = FindNode(graph.get(), "add");
EXPECT_EQ(node->num_inputs(), 2);
EXPECT_EQ(GetInputNode(node, 0)->name(), "input_a/0/debug_identity");
EXPECT_EQ(GetInputNode(node, 1)->name(), "input_b/0/debug_identity");
EXPECT_EQ(node->num_outputs(), 1);
EXPECT_EQ(GetOutputNode(node, 0)->name(), "add/0/debug_identity");
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV1) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV1");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"Add"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "another_toy", inputs, &outputs));
ASSERT_EQ(outputs.size(), 2);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]),
::testing::ElementsAreArray({12}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "Add:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "Add:in:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "Add:in:1_"));
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV2) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v2");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV2");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"result"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "serving_default", inputs, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "result:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "result:in:1_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "result:in:0_"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03955f20-a5af-4082-9ddf-ddb88325bfea | cpp | tensorflow/tensorflow | tf_op_quant_spec | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
bool IsOpWithDataMovementTrait(Operation* op) {
return isa<TF::IdentityOp, TF::CastOp, TF::ReshapeOp, TF::XlaShardingOp,
TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp, TF::ExpandDimsOp,
TF::SqueezeOp, TF::TransposeOp>(op);
}
bool IsOpWithQuantizableTrait(Operation* op) {
return isa<TF::XlaConvV2Op, TF::XlaDotV2Op, TF::MatMulOp, TF::Conv2DOp,
TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp,
TF::ResourceGatherOp, TF::DepthwiseConv2dNativeOp, TF::Conv3DOp,
TF::BatchMatMulV2Op, TF::EinsumOp>(op);
}
bool IsOpWithInt8TypeOperand(Operation* op) {
return (isa<TF::XlaConvV2Op, TF::XlaDotV2Op, TF::XlaGatherOp, TF::GatherOp,
TF::GatherV2Op>(op));
}
bool IsValueWithQuantizablePrecision(Value val) {
auto type = mlir::dyn_cast<ShapedType>(val.getType());
if (!type) return false;
if (type.getElementType().isF32() || type.getElementType().isBF16())
return true;
return false;
}
std::optional<tensorflow::quantization::QuantizationComponentSpec>
GetWeightComponentSpec(
const tensorflow::quantization::QuantizationOptions& quantization_options) {
for (auto& cur_spec : quantization_options.quantization_method()
.quantization_component_specs()) {
if (cur_spec.quantization_component() ==
tensorflow::quantization::QuantizationComponentSpec::COMPONENT_WEIGHT)
return cur_spec;
}
return std::nullopt;
}
std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op) {
auto spec = std::make_unique<OpQuantSpec>();
if (auto call_op = dyn_cast<TF::PartitionedCallOp>(op)) {
StringRef function_name =
mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue();
if (!function_name.starts_with("composite_")) {
return spec;
}
if (function_name.contains("depthwise_conv2d")) {
spec->coeff_op_quant_dim[1] = 3;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("conv2d")) {
spec->coeff_op_quant_dim[1] = 3;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("matmul")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias") ||
function_name.contains("and_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("einsum")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("conv3d")) {
spec->coeff_op_quant_dim[1] = 4;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("batch_matmul")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("gather")) {
spec->coeff_op_quant_dim[0] = -1;
}
for (auto quantizable_operand : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(quantizable_operand.first);
}
}
return spec;
}
std::unique_ptr<OpQuantScaleSpec> GetTfQuantScaleSpec(Operation* op) {
auto scale_spec = std::make_unique<OpQuantScaleSpec>();
if (llvm::isa<
TF::AvgPoolOp,
TF::ConcatOp,
TF::ConcatV2Op,
TF::ExpandDimsOp,
TF::IdentityNOp,
TF::IdentityOp,
TF::MaxPoolOp,
TF::PadV2Op,
TF::RankOp,
TF::ReshapeOp,
TF::SelectOp,
TF::SelectV2Op,
TF::ShapeNOp,
TF::ShapeOp,
TF::SizeOp,
TF::SqueezeOp,
TF::TransposeOp
>(op)) {
scale_spec->has_same_scale_requirement = true;
}
return scale_spec;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace mlir::quant {
namespace {
using QuantizationOptions = tensorflow::quantization::QuantizationOptions;
using QuantizationComponentSpec =
tensorflow::quantization::QuantizationComponentSpec;
TEST(TfOpQuantSpecTest, WeightComponentSpecExist) {
QuantizationOptions quant_options;
QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
auto mutable_quant_method = quant_options.mutable_quantization_method();
*mutable_quant_method->add_quantization_component_specs() = quant_spec;
auto output = GetWeightComponentSpec(quant_options);
EXPECT_TRUE(output.has_value());
}
TEST(TfOpQuantSpecTest, WeightComponentSpecDoNotExist) {
QuantizationOptions quant_options;
auto output = GetWeightComponentSpec(quant_options);
EXPECT_FALSE(output.has_value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
96e128d6-eefa-45ae-800e-f8e8117dd875 | cpp | google/cel-cpp | function_type | common/types/function_type.cc | common/types/function_type_test.cc | #include <cstddef>
#include <cstring>
#include <string>
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/type.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
struct TypeFormatter {
void operator()(std::string* out, const Type& type) const {
out->append(type.DebugString());
}
};
std::string FunctionDebugString(const Type& result,
absl::Span<const Type> args) {
return absl::StrCat("(", absl::StrJoin(args, ", ", TypeFormatter{}), ") -> ",
result.DebugString());
}
}
namespace common_internal {
absl::Nonnull<FunctionTypeData*> FunctionTypeData::Create(
absl::Nonnull<google::protobuf::Arena*> arena, const Type& result,
absl::Span<const Type> args) {
return ::new (arena->AllocateAligned(
offsetof(FunctionTypeData, args) + ((1 + args.size()) * sizeof(Type)),
alignof(FunctionTypeData))) FunctionTypeData(result, args);
}
FunctionTypeData::FunctionTypeData(const Type& result,
absl::Span<const Type> args)
: args_size(1 + args.size()) {
this->args[0] = result;
std::memcpy(this->args + 1, args.data(), args.size() * sizeof(Type));
}
}
FunctionType::FunctionType(absl::Nonnull<google::protobuf::Arena*> arena,
const Type& result, absl::Span<const Type> args)
: FunctionType(
common_internal::FunctionTypeData::Create(arena, result, args)) {}
std::string FunctionType::DebugString() const {
return FunctionDebugString(result(), args());
}
TypeParameters FunctionType::GetParameters() const {
ABSL_DCHECK(*this);
return TypeParameters(absl::MakeConstSpan(data_->args, data_->args_size));
}
const Type& FunctionType::result() const {
ABSL_DCHECK(*this);
return data_->args[0];
}
absl::Span<const Type> FunctionType::args() const {
ABSL_DCHECK(*this);
return absl::MakeConstSpan(data_->args + 1, data_->args_size - 1);
}
} | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
TEST(FunctionType, Kind) {
google::protobuf::Arena arena;
EXPECT_EQ(FunctionType(&arena, DynType{}, {BytesType()}).kind(),
FunctionType::kKind);
EXPECT_EQ(Type(FunctionType(&arena, DynType{}, {BytesType()})).kind(),
FunctionType::kKind);
}
TEST(FunctionType, Name) {
google::protobuf::Arena arena;
EXPECT_EQ(FunctionType(&arena, DynType{}, {BytesType()}).name(), "function");
EXPECT_EQ(Type(FunctionType(&arena, DynType{}, {BytesType()})).name(),
"function");
}
TEST(FunctionType, DebugString) {
google::protobuf::Arena arena;
{
std::ostringstream out;
out << FunctionType(&arena, DynType{}, {BytesType()});
EXPECT_EQ(out.str(), "(bytes) -> dyn");
}
{
std::ostringstream out;
out << Type(FunctionType(&arena, DynType{}, {BytesType()}));
EXPECT_EQ(out.str(), "(bytes) -> dyn");
}
}
TEST(FunctionType, Hash) {
google::protobuf::Arena arena;
EXPECT_EQ(absl::HashOf(FunctionType(&arena, DynType{}, {BytesType()})),
absl::HashOf(FunctionType(&arena, DynType{}, {BytesType()})));
}
TEST(FunctionType, Equal) {
google::protobuf::Arena arena;
EXPECT_EQ(FunctionType(&arena, DynType{}, {BytesType()}),
FunctionType(&arena, DynType{}, {BytesType()}));
EXPECT_EQ(Type(FunctionType(&arena, DynType{}, {BytesType()})),
FunctionType(&arena, DynType{}, {BytesType()}));
EXPECT_EQ(FunctionType(&arena, DynType{}, {BytesType()}),
Type(FunctionType(&arena, DynType{}, {BytesType()})));
EXPECT_EQ(Type(FunctionType(&arena, DynType{}, {BytesType()})),
Type(FunctionType(&arena, DynType{}, {BytesType()})));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/function_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/function_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
479417f4-59f9-4d2f-86c8-5736fa232d4f | cpp | tensorflow/tensorflow | spmd_expander | tensorflow/dtensor/mlir/spmd_expander.cc | tensorflow/dtensor/tests/spmd_expander_test.cc | #include "tensorflow/dtensor/mlir/spmd_expander.h"
#include <climits>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/dtensor_utils.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
#include "tensorflow/dtensor/mlir/expansions/replicated_spmd_expander.h"
#include "tensorflow/dtensor/mlir/ir/tf_dtensor.h"
#include "tensorflow/dtensor/mlir/layout_parsing.h"
#include "tensorflow/dtensor/mlir/op_utils.h"
#include "tensorflow/dtensor/mlir/shape_utils.h"
#include "tensorflow/dtensor/proto/layout.pb.h"
namespace tensorflow {
namespace dtensor {
namespace {
Status AdjustPartedLayout(const llvm::DenseMap<int, Layout>& input_layouts,
llvm::DenseMap<int, Layout>* computed_layouts) {
bool input_has_parted_layout = false;
for (const auto& input_layout : input_layouts) {
if (input_layout.second.type() == Layout::LayoutType::kParted) {
input_has_parted_layout = true;
break;
}
}
if (input_has_parted_layout) {
for (auto& computed_layout : *computed_layouts) {
TF_ASSIGN_OR_RETURN(Layout parted, computed_layout.second.ToParted());
computed_layout.getSecond() = parted;
}
}
return absl::OkStatus();
}
bool SkipExpansionForPartedLayout(mlir::Operation* op) {
if (llvm::isa<mlir::func::ReturnOp, mlir::tf_device::ReturnOp>(op)) {
return false;
}
auto status_or_input_layouts = ExtractRequiredLayoutFromOperands(op);
if (!status_or_input_layouts.ok()) {
return false;
}
bool operand_uses_parted_layout = false;
for (const auto& layout : status_or_input_layouts.value()) {
if (layout.type() == Layout::LayoutType::kParted) {
operand_uses_parted_layout = true;
break;
}
}
return operand_uses_parted_layout;
}
}
SPMDExpanderRegistry* SPMDExpanderRegistry::Global() {
static SPMDExpanderRegistry* registry = new SPMDExpanderRegistry();
return registry;
}
SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForFullOpName(
const std::string& full_op_name) {
auto key = full_op_name;
auto fn = op_to_propagate_fn_map_.find(key);
if (fn == op_to_propagate_fn_map_.end()) {
if (EnableReplicatedSpmdAsDefault(key)) {
LOG(WARNING)
<< full_op_name << " is defaulting to ReplicatedOpSPMDExpander. This "
<< " has performance implications as all inputs and outputs "
<< " will be replicated if they are not already. Please file a "
<< " feature request to TF DTensor to implement an efficient "
<< " SPMD for this operation.";
RegisterPropagateFn(key, std::make_unique<ReplicatedOpSPMDExpander>(
true));
return op_to_propagate_fn_map_.find(key)->second.get();
} else {
return nullptr;
}
}
return fn->second.get();
}
SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForOp(
mlir::Operation* op) {
return GetPropagateFnForFullOpName(OpName(op));
}
InitOnStartupMarker SPMDExpanderRegistry::RegisterPropagateFn(
std::string opName, std::unique_ptr<SPMDExpanderBase> prop) {
CHECK(op_to_propagate_fn_map_
.insert_or_assign(opName, std::move(prop))
.second);
return {};
}
Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op,
mlir::Operation** output) {
TF_ASSIGN_OR_RETURN(std::vector<std::optional<Layout>> computed_layout,
ExtractLayoutFromOp(op));
if (computed_layout.empty() && op->getNumResults() != 0) {
return errors::InvalidArgument(
absl::StrCat("No attached layout found for op : ", OpName(op),
" This might be due to an error in layout propagation.")
.c_str());
}
TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op));
bool skip_expansion_for_parted_layout = SkipExpansionForPartedLayout(op);
if (mesh.IsSingleDevice() || mesh.use_xla_spmd() ||
skip_expansion_for_parted_layout) {
if (skip_expansion_for_parted_layout) {
*output = InferSPMDExpandedLocalShape(op);
} else {
*output = op;
}
SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>(
computed_layout.data(), computed_layout.size()));
return absl::OkStatus();
}
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> global_output_shapes;
global_output_shapes.reserve(op->getNumResults());
for (auto output_value : op->getResults()) {
auto maybe_ranked =
mlir::dyn_cast<mlir::RankedTensorType>(output_value.getType());
if (llvm::isa<mlir::TF::RestoreV2Op, mlir::TF::DTensorRestoreV2Op>(op) &&
(!maybe_ranked || !maybe_ranked.hasStaticShape()))
continue;
TF_ASSIGN_OR_RETURN(auto global_shape,
ExtractGlobalOutputShape(output_value));
global_output_shapes.emplace_back(llvm::SmallVector<int64_t, 4>{
global_shape.begin(), global_shape.end()});
}
TF_ASSIGN_OR_RETURN(*output, this->ExpandOp(op));
SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>(
computed_layout.data(), computed_layout.size()));
for (const auto& output_layout_and_index :
llvm::enumerate(llvm::zip((*output)->getResults(), computed_layout))) {
const int index = output_layout_and_index.index();
const auto& output_and_layout = output_layout_and_index.value();
auto output_value = std::get<0>(output_and_layout);
auto local_expanded_shape_or_status = GetShapeOfValue(output_value);
if (!local_expanded_shape_or_status.ok()) continue;
const auto local_expanded_shape = local_expanded_shape_or_status.value();
const auto& layout = std::get<1>(output_and_layout);
const auto expected_global_shape =
layout->GlobalShapeFromLocalShape(local_expanded_shape);
for (const auto& expanded_and_true_global_shape :
llvm::zip(global_output_shapes[index], expected_global_shape)) {
const auto expanded_shape = std::get<0>(expanded_and_true_global_shape);
const auto expected_shape = std::get<1>(expanded_and_true_global_shape);
if (expanded_shape <= 0 || expected_shape <= 0) continue;
if (expanded_shape != expected_shape) {
return errors::Internal(
"SPMD expansion resulted in op output inconsistent with the "
"provided layout. Expected shape: <",
absl::StrJoin(expected_global_shape, ","), "> got shape: <",
absl::StrJoin(global_output_shapes[index], ","), ">");
}
}
}
return absl::OkStatus();
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) {
return errors::Unimplemented(
"ComputeLayoutForward API must be implemented via the subclass.");
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts,
const llvm::DenseMap<int, Layout>& output_layouts) {
TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op));
if (mesh.IsSingleDevice()) {
TF_ASSIGN_OR_RETURN(
Layout layout,
Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh));
auto layouts = llvm::DenseMap<int, Layout>{};
for (int i = 0; i < op->getNumResults(); ++i) {
layouts.insert({i, layout});
}
return layouts;
}
TF_ASSIGN_OR_RETURN(auto layouts, ComputeLayoutForward(op, input_layouts));
TF_RETURN_IF_ERROR(AdjustPartedLayout(input_layouts, &layouts));
return layouts;
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) {
return errors::Unimplemented(
"ComputeLayoutBackward API must be implemented via the subclass.");
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts,
const llvm::DenseMap<int, Layout>& output_layouts) {
TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op));
if (mesh.IsSingleDevice()) {
TF_ASSIGN_OR_RETURN(
Layout layout,
Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh));
auto layouts = llvm::DenseMap<int, Layout>{};
for (int i = 0; i < op->getNumOperands(); ++i) {
layouts.insert({i, layout});
}
return layouts;
}
return ComputeLayoutBackward(op, output_layouts);
}
Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output) {
SPMDExpanderBase* expander =
SPMDExpanderRegistry::Global()->GetPropagateFnForOp(op);
if (expander != nullptr) {
return expander->ExpandOpAndSetLayout(op, output);
} else {
VLOG(1) << "No expansion found for " << OpName(op) << "\n";
*output = op;
}
return absl::OkStatus();
}
}
} | #include "tensorflow/dtensor/mlir/spmd_expander.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/Operation.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
namespace tensorflow {
namespace dtensor {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
class DummyExpander : public SPMDExpanderBase {
StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override {
return errors::Unimplemented("");
}
StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
mlir::Operation* op,
const llvm::DenseMap<int, Layout>& input_layouts) override {
return errors::Unimplemented("");
}
StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
mlir::Operation* op,
const llvm::DenseMap<int, Layout>& output_layouts) override {
return errors::Unimplemented("");
}
};
class SPMDExpanderRegistryTest : public ::testing::Test {
public:
SPMDExpanderRegistryTest() {
registry_.RegisterPropagateFn(mlir::TF::AddOp::getOperationName().str(),
std::make_unique<DummyExpander>());
}
protected:
SPMDExpanderRegistry registry_;
};
TEST_F(SPMDExpanderRegistryTest, LookupFromOpName) {
EXPECT_THAT(registry_.GetPropagateFnForFullOpName("tf.Add"), NotNull());
EXPECT_THAT(registry_.GetPropagateFnForFullOpName("Unknown"), IsNull());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/mlir/spmd_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/spmd_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
80a4a321-6ecd-4fb0-b3a8-a72aa0335a14 | cpp | tensorflow/tensorflow | summary_optimizer | tensorflow/core/common_runtime/eager/summary_optimizer.cc | tensorflow/core/common_runtime/eager/summary_optimizer_test.cc | #include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
namespace tensorflow::summary_optimizer {
namespace {
constexpr char kDisableSummariesAtRuntime[] = "disable_summaries_at_runtime";
constexpr char kFlushSummaryWriter[] = "FlushSummaryWriter";
constexpr char kWriteSummary[] = "write_summary";
constexpr char kForwardFunctionName[] = "forward_function_name";
constexpr char kBackwardFunctionName[] = "backward_function_name";
constexpr char kEmptyString[] = "";
using summary_optimizer::internal::NormalizeEdgeName;
using ArgDef = OpDef::ArgDef;
void UpdateNestedFunctionName(NodeDef& ndef) {
for (auto& [k, v] : *ndef.mutable_attr()) {
if (v.has_func()) {
v.mutable_func()->set_name(StrippedFunctionName(v.func().name()));
} else if (v.list().func_size() > 0) {
for (auto& func : *v.mutable_list()->mutable_func()) {
func.set_name(StrippedFunctionName(func.name()));
}
}
}
}
void PruneDeletedInputDeps(
const absl::flat_hash_set<std::string>& nodes_to_keep, NodeDef& ndef) {
auto inputs = ndef.input();
ndef.clear_input();
for (const std::string& input : inputs) {
if (nodes_to_keep.contains(NormalizeEdgeName(input))) {
ndef.add_input(input);
}
}
}
FunctionDef StripSummary(const FunctionDef& fdef_with_summaries) {
FunctionDef fdef = fdef_with_summaries;
fdef.mutable_signature()->set_name(
StrippedFunctionName(fdef.signature().name()));
auto nodes = fdef.node_def();
fdef.clear_node_def();
absl::flat_hash_set<std::string> nodes_to_keep;
absl::c_transform(nodes, std::inserter(nodes_to_keep, nodes_to_keep.end()),
[](const NodeDef& node_def) { return node_def.name(); });
absl::c_transform(fdef.signature().input_arg(),
std::inserter(nodes_to_keep, nodes_to_keep.end()),
[](const ArgDef& input_arg) { return input_arg.name(); });
for (const NodeDef& ndef : nodes) {
if (ndef.op() == kFlushSummaryWriter) nodes_to_keep.erase(ndef.name());
for (const auto& substr : absl::StrSplit(ndef.name(), '/')) {
if (substr == kWriteSummary) {
nodes_to_keep.erase(ndef.name());
break;
}
}
}
for (NodeDef& ndef : nodes) {
if (!nodes_to_keep.contains(ndef.name())) continue;
PruneDeletedInputDeps(nodes_to_keep, ndef);
UpdateNestedFunctionName(ndef);
*fdef.add_node_def() = std::move(ndef);
}
auto control_ret = fdef.control_ret();
fdef.clear_control_ret();
for (const auto& [signature_node_name, node_name] : control_ret) {
if (!nodes_to_keep.contains(NormalizeEdgeName(node_name))) continue;
fdef.mutable_control_ret()->insert({signature_node_name, node_name});
}
auto control_outputs = fdef.signature().control_output();
fdef.mutable_signature()->clear_control_output();
for (const std::string& control_output : control_outputs) {
if (!fdef.control_ret().contains(control_output)) continue;
fdef.mutable_signature()->add_control_output(control_output);
}
for (auto& [k, v] : *fdef.mutable_attr()) {
if (k == kForwardFunctionName || k == kBackwardFunctionName) {
v.set_s(StrippedFunctionName(v.s()));
}
if (k == kDisableSummariesAtRuntime) v.clear_list();
}
return fdef;
}
}
namespace internal {
std::string NormalizeEdgeName(absl::string_view name) {
std::vector<std::string> edge_name =
absl::StrSplit(name, absl::ByAnyChar("^:"));
return edge_name[0].empty() ? edge_name[1] : edge_name[0];
}
}
std::pair<absl::string_view, bool> GetDisableSummariesInputArg(
const FunctionDef& fdef) {
auto it = fdef.attr().find(kDisableSummariesAtRuntime);
if (it == fdef.attr().end()) return {kEmptyString, false};
if (it->second.has_list()) {
const auto& list = it->second.list();
if (list.s_size() == 1 && list.b_size() == 1) {
return {list.s(0), list.b(0)};
}
}
return {kEmptyString, false};
}
std::vector<FunctionDef> StripSummaries(const FunctionDef& fdef,
const FunctionLibraryDefinition& flib) {
std::vector<FunctionDef> results;
if (GetDisableSummariesInputArg(fdef).first.empty()) return results;
results.push_back(StripSummary(fdef));
FunctionLibraryDefinition reachable_library = flib.ReachableDefinitions(fdef);
for (const std::string& fname : reachable_library.ListFunctionNames()) {
auto* nested_fdef = flib.Find(fname);
if (nested_fdef == nullptr) continue;
results.push_back(StripSummary(*nested_fdef));
}
return results;
}
std::string StrippedFunctionName(absl::string_view fname) {
return absl::StrCat(fname, "__instance__no_summaries");
}
} | #include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include <algorithm>
#include <string>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::tensorflow::summary_optimizer::GetDisableSummariesInputArg;
using ::tensorflow::summary_optimizer::StrippedFunctionName;
using ::tensorflow::summary_optimizer::StripSummaries;
using ::tensorflow::summary_optimizer::internal::NormalizeEdgeName;
using ::tsl::protobuf::TextFormat;
using ::tsl::protobuf::util::MessageDifferencer;
template <typename T>
void CompareProto(const T& expected, const std::string& text_proto) {
T proto;
ASSERT_TRUE(TextFormat::ParseFromString(text_proto, &proto));
MessageDifferencer differencer;
EXPECT_TRUE(differencer.Compare(expected, proto));
}
TEST(SummaryOptimizerInternal, NormalizesEdgeName) {
EXPECT_EQ(NormalizeEdgeName("include_summary"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary:0"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary/identity:0"),
"include_summary/identity");
}
TEST(SummaryOptimizer, GetsDisableSummariesInputArg) {
FunctionDef fdef;
auto input_arg = GetDisableSummariesInputArg(fdef);
EXPECT_EQ(input_arg.first, "");
EXPECT_FALSE(input_arg.second);
AttrValue attr_val;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
list { s: "remove_summary" b: true }
)pb",
&attr_val));
fdef.mutable_attr()->insert({"disable_summaries_at_runtime", attr_val});
input_arg = GetDisableSummariesInputArg(fdef);
EXPECT_EQ(input_arg.first, "remove_summary");
EXPECT_TRUE(input_arg.second);
}
TEST(SummaryOptimizer, StripsSummaries) {
FunctionDef fdef;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
signature {
name: "train" # Function name should be updated.
input_arg: { name: "include_summaries" }
control_output: "out_pruned" # Control output should be pruned
# because it was pruned from
# `control_ret`.
control_output: "out"
}
node_def { name: "x" }
node_def {
name: "write_summary/Identity"
} # Node should get pruned based on name.
node_def {
name: "Identity/x"
input: "write_summary/Identity" # Summary scope input should get
# pruned.
input: "x"
}
node_def {
name: "nested_fn"
op: "PartitionedCall"
attr {
key: "f"
value: { func: { name: "nested_fn" } }
}
}
node_def {
name: "list_of_nested_fns"
op: "SomeCustomOp"
attr {
key: "functions"
value: {
list: {
func: { name: "nested_fn2" }
func: { name: "nested_fn3" }
}
}
}
}
node_def {
op: "FlushSummaryWriter"
} # Node should get pruned based on op.
control_ret {
key: "out_pruned",
value: "write_summary/Identity:0"
} # Control return should get pruned because node was pruned.
control_ret { key: "out", value: "Identity/x" }
attr {
key: "forward_function_name"
value: {
s: "__inference_train_1"
} # Forward function name should be updated.
}
attr {
key: "backward_function_name"
value: {
s: "__inference_train_2"
} # Backward function name should be updated.
}
attr {
key: "disable_summaries_at_runtime"
value: { list { s: "include_summaries" b: false } }
}
)pb",
&fdef));
FunctionDef nested_fdef;
nested_fdef.mutable_signature()->set_name("nested_fn");
FunctionDef nested_fdef2;
nested_fdef2.mutable_signature()->set_name("nested_fn2");
FunctionDef nested_fdef3;
nested_fdef3.mutable_signature()->set_name("nested_fn3");
FunctionLibraryDefinition flib(OpRegistry::Global());
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef2));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef3));
std::vector<FunctionDef> stripped_fdefs = StripSummaries(fdef, flib);
ASSERT_EQ(stripped_fdefs.size(), 4);
struct {
bool operator()(const FunctionDef& lhs, const FunctionDef& rhs) const {
return lhs.signature().name() > rhs.signature().name();
}
} fdefOrdering;
std::sort(stripped_fdefs.begin(), stripped_fdefs.end(), fdefOrdering);
CompareProto(stripped_fdefs[0], R"pb(
signature {
name: "train__instance__no_summaries"
input_arg: { name: "include_summaries" }
control_output: "out"
}
node_def { name: "x" }
node_def { name: "Identity/x" input: "x" }
node_def {
name: "nested_fn"
op: "PartitionedCall"
attr {
key: "f"
value: { func: { name: "nested_fn__instance__no_summaries" } }
}
}
node_def {
name: "list_of_nested_fns"
op: "SomeCustomOp"
attr {
key: "functions"
value: {
list: {
func: { name: "nested_fn2__instance__no_summaries" }
func: { name: "nested_fn3__instance__no_summaries" }
}
}
}
}
control_ret { key: "out", value: "Identity/x" }
attr {
key: "forward_function_name",
value: { s: "__inference_train_1__instance__no_summaries" }
}
attr {
key: "backward_function_name",
value: { s: "__inference_train_2__instance__no_summaries" }
}
attr {
key: "disable_summaries_at_runtime"
value {}
}
)pb");
CompareProto(stripped_fdefs[1], R"pb(
signature { name: "nested_fn__instance__no_summaries" }
)pb");
CompareProto(stripped_fdefs[2], R"pb(
signature { name: "nested_fn3__instance__no_summaries" }
)pb");
CompareProto(stripped_fdefs[3], R"pb(
signature { name: "nested_fn2__instance__no_summaries" }
)pb");
}
TEST(SummaryOptimizer, DoesNotStripSummariesWhenNotEnabled) {
FunctionDef fdef;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
signature { name: "train" }
attr {
key: "disable_summaries_at_runtime",
value: {}
}
)pb",
&fdef));
FunctionLibraryDefinition flib(OpRegistry::Global());
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
EXPECT_TRUE(StripSummaries(fdef, flib).empty());
fdef.clear_attr();
TF_ASSERT_OK(flib.RemoveFunction("train"));
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
EXPECT_TRUE(StripSummaries(fdef, flib).empty());
}
TEST(SummaryOptimizer, GeneratesNewFunctionName) {
EXPECT_EQ(StrippedFunctionName("train"), "train__instance__no_summaries");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/summary_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/summary_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b915529b-6b60-4540-9f51-ef7f6ea5b06a | cpp | tensorflow/tensorflow | gpu_indexing_performance_model | third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model.cc | third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model_test.cc | #include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "llvm/Support/MathExtras.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/triton.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/coalescing_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/triton_emitter_constraints.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
struct OperandReadInfo {
int64_t total_bytes_read = 0;
int64_t is_coalesced = true;
};
int64_t GetPaddedTileSize(absl::Span<int64_t const> tile_sizes) {
int64_t result = 1;
for (int64_t tile_size : tile_sizes) {
result *= llvm::PowerOf2Ceil(tile_size);
}
return result;
}
bool DoesTileFitsInRegisters(int64_t tile_size,
const se::DeviceDescription& device_info) {
constexpr double kFractionOfRegistersAvailableToStoreTile = 0.4;
return tile_size <= kFractionOfRegistersAvailableToStoreTile *
device_info.registers_per_block_limit();
}
int64_t GetNumWarps(int64_t tile_size) {
if (tile_size <= 512) return 1;
if (tile_size <= 1024) return 2;
if (tile_size <= 16384) return 4;
if (tile_size <= 32768) return 8;
if (tile_size <= 65536) return 16;
return 32;
}
}
int64_t GpuPerformanceModelWithIndexingAnalysis::FlopsPerElement(
const HloInstruction* instr) {
switch (instr->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConstant:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kIota:
case HloOpcode::kPad:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
return 0;
default:
break;
};
if (instr->IsElementwise()) {
return cost_analysis_.GetFlopsPerElementwiseOpElement(
instr->shape().element_type(), instr->opcode());
}
if (instr->opcode() == HloOpcode::kReduce) {
int64_t flops_per_reduce_computation = 0;
for (const HloInstruction* reducer_instr :
instr->called_computations()[0]->instructions()) {
flops_per_reduce_computation += FlopsPerElement(reducer_instr);
}
auto operand_shape = instr->operand(0)->shape();
auto output_shape = instr->shape().IsArray()
? instr->shape()
: instr->shape().tuple_shapes(0);
int64_t reduction_factor = ShapeUtil::ElementsIn(operand_shape) /
ShapeUtil::ElementsIn(output_shape);
return (reduction_factor - 1) * flops_per_reduce_computation;
}
TF_CHECK_OK(
cost_analysis_.RevisitInstruction(const_cast<HloInstruction*>(instr)));
return cost_analysis_.flop_count(*instr) /
ShapeUtil::ElementsInRecursive(instr->shape());
}
int64_t GpuPerformanceModelWithIndexingAnalysis::GetShapeSizeRecursive(
const Shape& shape) const {
CHECK(shape.IsArray() || shape.IsTuple());
if (shape.IsArray()) {
return shape_size_(shape);
}
int64_t total_size = 0;
for (const auto& element_shape : shape.tuple_shapes()) {
total_size += GetShapeSizeRecursive(element_shape);
}
return total_size;
}
int64_t GetIterationSpaceSize(const IndexingMap& indexing_map,
const HloInstruction* instr) {
if (indexing_map.IsUndefined()) {
return ShapeUtil::ElementsInRecursive(instr->shape());
}
if (indexing_map.IsKnownEmpty()) {
return 0;
}
auto get_ranges_iteration_space_size =
[](const std::vector<Interval>& ranges) {
int64_t num_iters = 1;
for (const Interval& range : ranges) {
num_iters *= range.upper - range.lower + 1;
}
return num_iters;
};
return get_ranges_iteration_space_size(indexing_map.GetSymbolBounds()) *
get_ranges_iteration_space_size(indexing_map.GetDimensionBounds());
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForFusion(
const HloFusionAnalysis& fusion_analysis, bool is_coalesced) {
auto& fusion_adaptor = fusion_analysis.fusion();
VLOG(5) << "EstimateRunTimeForFusion: " << fusion_adaptor.ToString();
auto roots = fusion_adaptor.GetRoots();
CHECK_EQ(roots.size(), 1)
<< "Indexing cost model doesn't support multi-output fusions.";
auto root_shape = roots.front().shape();
LaunchDimensions launch_dimensions =
EstimateFusionLaunchDimensions(fusion_analysis);
int64_t num_blocks = launch_dimensions.num_blocks();
auto grouped_fusion_indexing = ComputeGroupedOutputToInputIndexing(
fusion_adaptor, roots[0], mlir_context_);
int64_t flops = 0;
int64_t bytes_read = 0;
absl::Duration read_time = absl::ZeroDuration();
for (const auto& [instr, indexing_maps] : grouped_fusion_indexing) {
VLOG(10) << "instr: " << instr->name();
bool is_operand = !fusion_adaptor.ContainsInstruction(instr);
auto element_type = instr->shape().element_type();
int64_t n_bytes_total = 0;
for (const auto& indexing_map : indexing_maps) {
VLOG(10) << indexing_map;
int64_t num_iters = GetIterationSpaceSize(indexing_map, instr);
if (is_operand) {
int64_t type_size = ShapeUtil::ByteSizeOfPrimitiveType(element_type);
n_bytes_total += type_size * num_iters;
} else {
int64_t flops_per_element = FlopsPerElement(instr);
flops += flops_per_element * num_iters;
}
}
if (is_operand) {
int64_t operand_size = shape_size_(instr->shape());
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
bytes_read += n_bytes_total;
VLogOperandRead(instr, n_bytes_total, n_bytes_net, is_coalesced);
read_time +=
ReadTimeWithDRAMHeuristic(*device_info_, num_blocks, n_bytes_net,
n_bytes_total, element_type, is_coalesced);
}
}
int64_t bytes_written = GetShapeSizeRecursive(root_shape);
absl::Duration compute_time =
ComputeTime(*device_info_, flops, num_blocks,
launch_dimensions.num_threads_per_block());
absl::Duration write_time = WriteTime(*device_info_, bytes_written);
absl::Duration memory_access_time = read_time + write_time;
absl::Duration exec_time = CombineComputeAndMemoryAccessTime(
compute_time, memory_access_time,
GpuPerformanceModelOptions::PriorityFusion());
EstimateRunTimeData runtime_data = {flops, bytes_read, bytes_written,
read_time, write_time, compute_time,
exec_time};
VLOG(3) << "Runtime data for HLO fusion: " << fusion_adaptor.ToString()
<< "\n"
<< launch_dimensions.ToString() << "\n"
<< runtime_data.ToString();
return runtime_data;
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForInstruction(
const HloInstruction* producer) {
if (producer->opcode() == HloOpcode::kBitcast) {
return EstimateRunTimeData::Zero();
}
auto fusion_analysis = HloFusionAnalysis::Create(*producer, *device_info_);
bool is_coalesced = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer);
return EstimateRunTimeForFusion(fusion_analysis, is_coalesced);
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer) {
auto fusion_analysis =
HloFusionAnalysis::Create(*producer, *consumer, *device_info_);
bool is_coalesced = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer, consumer);
return EstimateRunTimeForFusion(fusion_analysis, is_coalesced);
}
GpuPerformanceModelWithIndexingAnalysis::RunTimes
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimes(
const HloInstruction* producer,
absl::Span<const HloInstruction* const> fused_consumers) {
auto producer_runtime = EstimateRunTimeForInstruction(producer);
absl::Duration time_unfused =
kKernelLaunchOverhead * (fused_consumers.size() + 1) +
producer_runtime.exec_time;
absl::Duration time_fused = kKernelLaunchOverhead * fused_consumers.size();
for (const auto& consumer : fused_consumers) {
time_unfused += EstimateRunTimeForInstruction(consumer).exec_time;
time_fused +=
EstimateRunTimeForProducerConsumer(producer, consumer).exec_time;
}
return {time_unfused, time_fused};
}
absl::StatusOr<EstimateRunTimeData>
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTiledHloComputation(
const HloFusionAdaptor& fusion_adaptor,
const TiledHloComputation& tiled_hlo_computation,
const LaunchDimensions& launch_dimensions) {
absl::flat_hash_map<const HloInstruction*, OperandReadInfo> n_bytes_total_map;
int64_t flops = 0;
int64_t bytes_read = 0;
int64_t num_blocks = launch_dimensions.num_blocks();
for (const auto& tiled_hlo : tiled_hlo_computation.instructions()) {
int64_t padded_tile_size = GetPaddedTileSize(tiled_hlo->tile_sizes());
if (!DoesTileFitsInRegisters(padded_tile_size, *device_info_)) {
return EstimateRunTimeData::Infinite();
}
const HloInstruction* hlo = tiled_hlo->hlo();
if (fusion_adaptor.ContainsInstruction(hlo)) {
if (hlo->opcode() == HloOpcode::kConcatenate) {
return absl::FailedPreconditionError(
"Concatenate is not supported by the indexing cost model.");
}
int64_t num_elements = num_blocks * padded_tile_size;
flops += FlopsPerElement(hlo) * num_elements;
} else {
int64_t tile_size = Product(tiled_hlo->tile_sizes());
int64_t num_elements = num_blocks * tile_size;
int64_t element_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(hlo->shape().element_type());
int64_t tile_bytes_read = element_type_size * num_elements;
bytes_read += tile_bytes_read;
bool is_coalesced =
IsTiledReadCoalescedHeuristic(*tiled_hlo, *device_info_);
OperandReadInfo& operand_read_info = n_bytes_total_map[hlo];
operand_read_info.total_bytes_read += tile_bytes_read;
operand_read_info.is_coalesced &= is_coalesced;
}
}
absl::Duration read_time = absl::ZeroDuration();
for (const auto& [hlo, operand_read_info] : n_bytes_total_map) {
int64_t operand_size = shape_size_(hlo->shape());
int64_t n_bytes_net =
std::min(operand_size, operand_read_info.total_bytes_read);
read_time +=
ReadTimeWithDRAMHeuristic(*device_info_, num_blocks, n_bytes_net,
operand_read_info.total_bytes_read,
hlo->shape().element_type(),
operand_read_info.is_coalesced);
}
int64_t bytes_written =
GetShapeSizeRecursive(tiled_hlo_computation.GetRoot()->hlo()->shape());
absl::Duration compute_time =
ComputeTime(*device_info_, flops, launch_dimensions.num_blocks(),
launch_dimensions.num_threads_per_block());
absl::Duration write_time = WriteTime(*device_info_, bytes_written);
absl::Duration memory_access_time = read_time + write_time;
absl::Duration exec_time = CombineComputeAndMemoryAccessTime(
compute_time, memory_access_time,
GpuPerformanceModelOptions::PriorityFusion());
return EstimateRunTimeData{flops,
bytes_read,
bytes_written,
read_time,
write_time,
compute_time,
exec_time};
}
absl::StatusOr<EstimateRunTimeData>
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTiledFusion(
const HloFusionAdaptor& fusion_adaptor,
const LaunchDimensions& launch_dimensions,
absl::Span<const int64_t> tile_sizes) {
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(
fusion_adaptor, mlir_context_,
nullptr);
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&analysis_or_error)) {
return absl::FailedPreconditionError(absl::StrCat(
"SymbolicTileAnalysis failed. ", fusion_decision->Explain()));
}
SymbolicTileAnalysis analysis =
std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
TF_ASSIGN_OR_RETURN(TiledHloComputation tiled_hlo_computation,
analysis.ComputeTiledHloInstructions(tile_sizes));
return EstimateRunTimeForTiledHloComputation(
fusion_adaptor, tiled_hlo_computation, launch_dimensions);
}
absl::StatusOr<EstimateRunTimeData>
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTriton(
const HloInstruction* producer, const HloInstruction* consumer) {
const auto& fusion_analysis =
(consumer == nullptr) ? fusion_analysis_cache_->Get(*producer)
: fusion_analysis_cache_->Get(*producer, *consumer);
auto launch_config = TritonFusion(fusion_analysis).launch_config();
if (!launch_config.has_value()) {
return absl::InvalidArgumentError(
"Could not get launch config for Triton fusion.");
}
return EstimateRunTimeForTiledFusion(
fusion_analysis.fusion(), launch_config->launch_dimensions,
launch_config->block_level_parameters.output_tile_sizes);
}
LaunchDimensions
GpuPerformanceModelWithIndexingAnalysis::GetLaunchDimensionsForTiledFusion(
const TiledHloComputation& tiled_hlo_computation) {
int64_t num_blocks = tiled_hlo_computation.num_output_tiles();
int64_t largest_live_tile_size = 1;
for (const auto& tiled_hlo : tiled_hlo_computation.instructions()) {
largest_live_tile_size = std::max(
largest_live_tile_size, GetPaddedTileSize(tiled_hlo->tile_sizes()));
}
int64_t num_warps = GetNumWarps(largest_live_tile_size);
return {static_cast<uint64_t>(num_blocks),
static_cast<uint64_t>(num_warps * WarpSize())};
}
absl::StatusOr<TiledRunTimeDataOrError>
GpuPerformanceModelWithIndexingAnalysis::TryFindBestTilingForFusion(
const HloFusionAdaptor& fusion_adaptor) {
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(
fusion_adaptor, mlir_context_,
TritonEmitterConstraints::GetBuilder(*device_info_));
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&analysis_or_error)) {
return *fusion_decision;
}
SymbolicTileAnalysis analysis =
std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
TF_ASSIGN_OR_RETURN(auto tilings, analysis.GetGoodTilings());
std::optional<TiledRunTimeData> best_tiled_run_time_data;
for (const auto& tiling : tilings) {
TF_ASSIGN_OR_RETURN(TiledHloComputation tiled_hlo_computation,
analysis.ComputeTiledHloInstructions(tiling));
LaunchDimensions launch_dimensions =
GetLaunchDimensionsForTiledFusion(tiled_hlo_computation);
TF_ASSIGN_OR_RETURN(
EstimateRunTimeData estimate_run_time_data,
EstimateRunTimeForTiledHloComputation(
fusion_adaptor, tiled_hlo_computation, launch_dimensions));
if (!best_tiled_run_time_data.has_value() ||
estimate_run_time_data.exec_time <
best_tiled_run_time_data->runtime_data.exec_time) {
BlockLevelParameters block_level_parameters;
block_level_parameters.output_tile_sizes =
std::vector<int64_t>(tiling.begin(), tiling.end());
block_level_parameters.num_warps =
launch_dimensions.num_threads_per_block() / WarpSize();
best_tiled_run_time_data =
TiledRunTimeData{estimate_run_time_data, block_level_parameters};
}
}
if (!best_tiled_run_time_data.has_value()) {
return FusionDecision::Forbid("No valid tilings found.");
}
return *best_tiled_run_time_data;
}
}
} | #include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include <cstdint>
#include <memory>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class GpuIndexingPerformanceModelTest : public HloTestBase {
public:
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
mlir::MLIRContext mlir_context_;
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HloFusionAnalysisCache fusion_analysis_cache_{device_info_};
GpuPerformanceModelWithIndexingAnalysis indexing_cost_model_{
&device_info_, &fusion_analysis_cache_, ShapeSizeBytesFunction(),
&mlir_context_};
GpuIndexingPerformanceModelTest() : HloTestBase() {}
};
TEST_F(GpuIndexingPerformanceModelTest, BroadcastElementwise) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule extracted
ENTRY entry_computation {
param_0 = f32[32]{0} parameter(0)
broadcast = f32[32,1,768]{2,1,0} broadcast(param_0), dimensions={0}
param_1 = f32[32,1,768]{2,1,0} parameter(1)
ROOT multiply = f32[32,1,768]{2,1,0} multiply(broadcast, param_1)
}
)"));
auto producer =
module->entry_computation()->GetInstructionWithName("broadcast");
auto consumer =
module->entry_computation()->GetInstructionWithName("multiply");
auto runtime_data = indexing_cost_model_.EstimateRunTimeForProducerConsumer(
producer, consumer);
EXPECT_EQ(runtime_data.flops, 73728);
EXPECT_EQ(runtime_data.bytes_written, 98304);
EXPECT_NEAR(absl::ToInt64Nanoseconds(runtime_data.write_time), 128, 2);
EXPECT_NEAR(absl::ToInt64Nanoseconds(runtime_data.exec_time), 267, 2);
}
TEST_F(GpuIndexingPerformanceModelTest, Bitcast) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule m
ENTRY entry_computation {
param_0 = bf16[4,8,65,128]{3,2,1,0} parameter(0)
ROOT bitcast = bf16[8,4,65,128]{3,2,0,1} bitcast(param_0)
}
)"));
auto instruction =
module->entry_computation()->GetInstructionWithName("bitcast");
auto runtime_data =
indexing_cost_model_.EstimateRunTimeForInstruction(instruction);
EXPECT_EQ(runtime_data.flops, 0);
EXPECT_EQ(runtime_data.bytes_written, 0);
EXPECT_EQ(runtime_data.write_time, absl::ZeroDuration());
EXPECT_EQ(runtime_data.exec_time, absl::ZeroDuration());
}
TEST_F(GpuIndexingPerformanceModelTest, Reduce) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.0 = f32[] add(param_0, param_1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(param_0.3, constant), dimensions={1}, to_apply=add
}
)"));
auto instruction = module->entry_computation()->root_instruction();
auto runtime_data =
indexing_cost_model_.EstimateRunTimeForInstruction(instruction);
EXPECT_EQ(runtime_data.flops, 3744);
EXPECT_EQ(runtime_data.bytes_written, 128);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.write_time), 0, 1);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.exec_time), 29, 1);
}
TEST_F(GpuIndexingPerformanceModelTest, VariadicReduce) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0}) reduce(param_0.3, param_1.3, param_2.2, constant), dimensions={1}, to_apply=add
}
)"));
auto instruction = module->entry_computation()->root_instruction();
auto runtime_data =
indexing_cost_model_.EstimateRunTimeForInstruction(instruction);
EXPECT_EQ(runtime_data.flops, 7488);
EXPECT_EQ(runtime_data.bytes_written, 256);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.write_time), 0, 1);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.exec_time), 58, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
TritonSoftmaxFusionInstructionIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
broadcast_0 = f32[512,911]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[512,911]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[512]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[512,911]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[512,911]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
ROOT triton_softmax = f32[512,911]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","911"],"num_warps":"2"}}}
}
)"));
TF_ASSERT_OK_AND_ASSIGN(auto runtime_data,
indexing_cost_model_.EstimateRunTimeForTriton(
module->entry_computation()->root_instruction()));
constexpr int64_t kParam0SizeBytes = 512 * 911 * 4;
constexpr int64_t kParam1SizeBytes = 911 * 4;
constexpr int64_t kOutputSizeBytes = 512 * 911 * 4;
constexpr int64_t kExpectedBytesRead =
kParam0SizeBytes + 512 * kParam1SizeBytes;
EXPECT_EQ(runtime_data.bytes_read, kExpectedBytesRead);
EXPECT_EQ(runtime_data.bytes_written, kOutputSizeBytes);
EXPECT_NEAR(absl::ToDoubleMicroseconds(runtime_data.exec_time), 5, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
TritonSoftmaxProducerConsumerFusionIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
fusion {
param_0 = f32[512,911] parameter(0)
param_1 = f32[911] parameter(1)
broadcast = f32[512,911] broadcast(param_1), dimensions={1}
ROOT multiply = f32[512,911] multiply(param_0, broadcast)
}
triton_softmax_computation {
param_0 = f32[512,911] parameter(0)
constant_0 = f32[] constant(0)
reduce_0 = f32[512] reduce(param_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[512,911] broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[512,911] multiply(param_0, broadcast_4)
}
ENTRY main {
param_0 = f32[512,911] parameter(0)
param_1 = f32[911] parameter(1)
fusion.1 = f32[512,911] fusion(param_0, param_1), kind=kLoop, calls=fusion
ROOT triton_softmax = f32[512,911] fusion(fusion.1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","911"],"num_warps":"2"}}}
}
)"));
auto consumer = module->entry_computation()->root_instruction();
auto producer = consumer->operand(0);
TF_ASSERT_OK_AND_ASSIGN(
auto runtime_data,
indexing_cost_model_.EstimateRunTimeForTriton(producer, consumer));
constexpr int64_t kParam0SizeBytes = 512 * 911 * 4;
constexpr int64_t kParam1SizeBytes = 911 * 4;
constexpr int64_t kOutputSizeBytes = 512 * 911 * 4;
constexpr int64_t kExpectedBytesRead =
kParam0SizeBytes + 512 * kParam1SizeBytes;
EXPECT_EQ(runtime_data.bytes_read, kExpectedBytesRead);
EXPECT_EQ(runtime_data.bytes_written, kOutputSizeBytes);
EXPECT_NEAR(absl::ToDoubleMicroseconds(runtime_data.exec_time), 5, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateBestTiling_TritonSoftmax_IsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
broadcast_0 = f32[512,911]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[512,911]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[512]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[512,911]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[512,911]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
ROOT triton_softmax = f32[512,911]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(
auto tiling_result,
indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor));
ASSERT_TRUE(std::holds_alternative<TiledRunTimeData>(tiling_result));
auto tiled_runtime_data = std::get<TiledRunTimeData>(tiling_result);
constexpr int64_t kParam0SizeBytes = 512 * 911 * 4;
constexpr int64_t kParam1SizeBytes = 911 * 4;
constexpr int64_t kOutputSizeBytes = 512 * 911 * 4;
constexpr int64_t kExpectedBytesRead =
kParam0SizeBytes + 128 * kParam1SizeBytes;
EXPECT_THAT(tiled_runtime_data.block_level_parameters.output_tile_sizes,
ElementsAre(4, 911));
EXPECT_EQ(tiled_runtime_data.block_level_parameters.num_warps, 4);
EXPECT_EQ(tiled_runtime_data.runtime_data.bytes_read, kExpectedBytesRead);
EXPECT_EQ(tiled_runtime_data.runtime_data.bytes_written, kOutputSizeBytes);
EXPECT_NEAR(
absl::ToDoubleMicroseconds(tiled_runtime_data.runtime_data.exec_time), 5,
1);
}
TEST_F(
GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_NumberOfTilesLargerThanInt32Max_IsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule softmax
max_computation {
arg_0 = f16[] parameter(0)
arg_1 = f16[] parameter(1)
ROOT maximum = f16[] maximum(arg_0, arg_1)
}
softmax {
param_0 = f16[131076,16384]{1,0} parameter(0)
constant_neg_inf = f16[] constant(-inf)
reduce = f16[131076]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = f16[131076,16384]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = f16[131076,16384]{1,0} subtract(param_0, broadcast)
}
ENTRY main {
param_0 = f16[131076,16384]{1,0} parameter(0)
ROOT fusion = f16[131076,16384]{1,0} fusion(param_0), kind=kCustom, calls=softmax
})"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
LaunchDimensions launch_dimensions{131076LL * 16384LL, 32};
TF_ASSERT_OK_AND_ASSIGN(
auto runtime_data,
indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, launch_dimensions, {1, 1}));
EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.read_time), 2931, 1);
EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.compute_time), 19, 1);
EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.exec_time), 2932, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_ConcatenateOperandIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
fusion {
param_0 = f32[32,64] parameter(0)
param_1 = f32[32,64] parameter(1)
ROOT subtract = f32[32,64] subtract(param_0, param_1)
}
ENTRY main {
param_0 = f32[32,16] parameter(0)
param_1 = f32[32,48] parameter(1)
param_2 = f32[32,64] parameter(2)
concatenate = f32[32,64] concatenate(param_0, param_1), dimensions={1}
ROOT fusion = f32[32,64] fusion(concatenate, param_2), kind=kCustom, calls=fusion
})"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
LaunchDimensions launch_dimensions{8, WarpSize()};
auto result = indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, launch_dimensions, {16, 16});
TF_EXPECT_OK(result.status());
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_ConcatenateIsNotSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
concatenate_fusion {
param_0 = f32[32, 128] parameter(0)
param_1 = f32[64, 128] parameter(1)
ROOT concatenate = f32[96, 128] concatenate(param_0, param_1), dimensions={0}
}
ENTRY main {
param_0 = f32[32, 128] parameter(0)
param_1 = f32[64, 128] parameter(1)
ROOT fusion = f32[96, 128] fusion(param_0, param_1), kind=kCustom, calls=concatenate_fusion
})"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
LaunchDimensions launch_dimensions{96, 128};
auto result = indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, launch_dimensions, {1, 128});
EXPECT_THAT(result, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("SymbolicTileAnalysis failed")));
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_RegisterSpill_ReturnsInfinite) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[16,16000] parameter(0)
constant_0 = f32[] constant(0)
reduce_0 = f32[16] reduce(param_0, constant_0), dimensions={1}, to_apply=add
broadcast = f32[16,16000] broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[16,16000] multiply(param_0, broadcast)
}
ENTRY main {
param_0 = f32[16,16000] parameter(0)
ROOT triton_softmax = f32[16,16000] fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(
auto tiling_result,
indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor));
TF_ASSERT_OK_AND_ASSIGN(auto res1,
indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, {16, 32},
{1, 16000}));
EXPECT_NEAR(absl::ToDoubleMicroseconds(res1.exec_time), 3, 1);
TF_ASSERT_OK_AND_ASSIGN(auto res2,
indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, {8, 32},
{2, 16000}));
EXPECT_TRUE(res2.IsInfinite());
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_UsesPaddedTileSizeForMemoryAccessTime) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
triton_softmax_computation {
param_0 = f32[65,65] parameter(0)
param_1 = f32[65,65] parameter(1)
ROOT add = f32[65,65] add(param_0, param_1)
}
ENTRY main {
param_0 = f32[65,65] parameter(0)
param_1 = f32[65,65] parameter(1)
ROOT triton_softmax = f32[65,65] fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(
auto tiling_result,
indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor));
TF_ASSERT_OK_AND_ASSIGN(
auto res, indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, {1, 2 * WarpSize()},
{65, 65}));
constexpr int64_t kParamSizeBytes = 65 * 65 * 4;
constexpr int64_t kPaddedOutputTileSize = 128 * 128;
constexpr int64_t kAddFlops = 3;
EXPECT_EQ(res.bytes_read, 2 * kParamSizeBytes);
EXPECT_EQ(res.flops, kPaddedOutputTileSize * kAddFlops);
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_UncoalescedReadsTakeMoreTime) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
triton_softmax_computation {
param_0 = f32[2048,512] parameter(0)
param_1 = f32[2048,512] parameter(1)
ROOT add = f32[2048,512] add(param_0, param_1)
}
ENTRY main {
param_0 = f32[2048,512] parameter(0)
param_1 = f32[2048,512] parameter(1)
ROOT triton_softmax = f32[2048,512] fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(
auto tiling_result,
indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor));
TF_ASSERT_OK_AND_ASSIGN(
auto res_coalesced,
indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, {8192, 2 * WarpSize()},
{1, 128}));
TF_ASSERT_OK_AND_ASSIGN(
auto res_uncoalesced,
indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, {8192, 2 * WarpSize()},
{128, 1}));
constexpr int64_t kParamSizeBytes = 2048 * 512 * 4;
EXPECT_EQ(res_coalesced.bytes_read, 2 * kParamSizeBytes);
EXPECT_EQ(res_uncoalesced.bytes_read, 2 * kParamSizeBytes);
EXPECT_NEAR(absl::ToDoubleMicroseconds(res_coalesced.read_time), 11, 1);
EXPECT_NEAR(absl::ToDoubleMicroseconds(res_uncoalesced.read_time), 175, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
GetLaunchDimensionsForTiledFusion_IsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
triton_softmax_computation {
param_0 = f32[9,9,9] parameter(0)
param_1 = f32[9,9,9] parameter(1)
ROOT multiply = f32[9,9,9] multiply(param_0, param_1)
}
ENTRY main {
param_0 = f32[9,9,9] parameter(0)
param_1 = f32[9,9,9] parameter(1)
ROOT fusion = f32[9,9,9] fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(
*fusion_adaptor, &mlir_context_,
nullptr);
ASSERT_TRUE(std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error));
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
std::get<SymbolicTileAnalysis>(analysis_or_error)
.ComputeTiledHloInstructions({9, 9, 9}));
LaunchDimensions launch_dimensions = GpuPerformanceModelWithIndexingAnalysis::
GetLaunchDimensionsForTiledFusion(tiled_hlo_computation);
EXPECT_EQ(launch_dimensions.num_blocks(), 1);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 4 * WarpSize());
}
TEST_F(GpuIndexingPerformanceModelTest,
NumberOfWarpsDependsOnLargestLiveTileSize) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add = f32[] add(param_0, param_1)
}
fusion_computation {
param_0 = f32[1,4096] parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[1] reduce(param_0, c0), dimensions={1}, to_apply=add
}
ENTRY main {
param_0 = f32[1,4096] parameter(0)
ROOT fusion = f32[1] fusion(param_0), kind=kCustom,
calls=fusion_computation,
backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(
*fusion_adaptor, &mlir_context_,
nullptr);
ASSERT_TRUE(std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error));
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
std::get<SymbolicTileAnalysis>(analysis_or_error)
.ComputeTiledHloInstructions({1}));
LaunchDimensions launch_dimensions = GpuPerformanceModelWithIndexingAnalysis::
GetLaunchDimensionsForTiledFusion(tiled_hlo_computation);
EXPECT_EQ(launch_dimensions.num_blocks(), 1);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 4 * WarpSize());
}
class FlopsPerElementTest : public GpuIndexingPerformanceModelTest {
public:
void CompareFlopsModels(absl::string_view hlo_module_string) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_module_string));
GpuHloCostAnalysis cost_analysis(
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
{},
true},
device_info_);
ASSERT_IS_OK(module->entry_computation()->Accept(&cost_analysis));
auto instr = module->entry_computation()->root_instruction();
int64_t flops_per_element = indexing_cost_model_.FlopsPerElement(instr);
const Shape& output_shape = instr->shape().IsArray()
? instr->shape()
: instr->shape().tuple_shapes(0);
int64_t total_flops =
ShapeUtil::ElementsIn(output_shape) * flops_per_element;
EXPECT_EQ(total_flops, cost_analysis.flop_count(*instr));
}
};
TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Reduce) {
CompareFlopsModels(R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.0 = f32[] add(param_0, param_1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40] parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32] reduce(param_0.3, constant), dimensions={1}, to_apply=add
}
)");
}
TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_VariadicReduce) {
CompareFlopsModels(R"(
HloModule m
add_multiply {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add = f32[] add(param_0, param_2)
multiply = f32[] multiply(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add, multiply)
}
ENTRY entry_computation {
param_0 = f32[32,40] parameter(0)
c0 = f32[] constant(0)
ROOT reduce = (f32[32], f32[32]) reduce(param_0, param_0, c0, c0), dimensions={1}, to_apply=add_multiply
}
)");
}
TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Elementwise_Cosine) {
CompareFlopsModels(R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[32] parameter(0)
ROOT cosine = f32[32] cosine(param_0)
}
)");
}
TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Elementwise_Clamp) {
CompareFlopsModels(R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[32] parameter(0)
param_1 = f32[32] parameter(1)
param_2 = f32[32] parameter(2)
ROOT clamp = clamp(param_0, param_1, param_2)
}
)");
}
TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Gather) {
CompareFlopsModels(R"(
HloModule module
entry {
operand = f32[33, 76, 70] parameter(0)
indices = s32[1806, 2] parameter(1)
ROOT gather = f32[1806, 7, 8, 4] gather(operand, indices),
offset_dims={1,2,3}, collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
})");
}
TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_ReduceWindow) {
CompareFlopsModels(R"(
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param_0 = f32[13,12,8,15] parameter(0)
c0 = f32[] constant(0)
ROOT reduce-window = f32[13,3,8,15] reduce-window(param_0, c0), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=add
})");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6dca675b-a2d7-4768-a4d6-9fbb377b81d0 | cpp | google/quiche | quic_coalesced_packet | quiche/quic/core/quic_coalesced_packet.cc | quiche/quic/core/quic_coalesced_packet_test.cc | #include "quiche/quic/core/quic_coalesced_packet.h"
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
QuicCoalescedPacket::QuicCoalescedPacket()
: length_(0), max_packet_length_(0), ecn_codepoint_(ECN_NOT_ECT) {}
QuicCoalescedPacket::~QuicCoalescedPacket() { Clear(); }
bool QuicCoalescedPacket::MaybeCoalescePacket(
const SerializedPacket& packet, const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
quiche::QuicheBufferAllocator* allocator,
QuicPacketLength current_max_packet_length,
QuicEcnCodepoint ecn_codepoint) {
if (packet.encrypted_length == 0) {
QUIC_BUG(quic_bug_10611_1) << "Trying to coalesce an empty packet";
return true;
}
if (length_ == 0) {
#ifndef NDEBUG
for (const auto& buffer : encrypted_buffers_) {
QUICHE_DCHECK(buffer.empty());
}
#endif
QUICHE_DCHECK(initial_packet_ == nullptr);
max_packet_length_ = current_max_packet_length;
self_address_ = self_address;
peer_address_ = peer_address;
} else {
if (self_address_ != self_address || peer_address_ != peer_address) {
QUIC_DLOG(INFO)
<< "Cannot coalesce packet because self/peer address changed";
return false;
}
if (max_packet_length_ != current_max_packet_length) {
QUIC_BUG(quic_bug_10611_2)
<< "Max packet length changes in the middle of the write path";
return false;
}
if (ContainsPacketOfEncryptionLevel(packet.encryption_level)) {
return false;
}
if (ecn_codepoint != ecn_codepoint_) {
return false;
}
}
if (length_ + packet.encrypted_length > max_packet_length_) {
return false;
}
QUIC_DVLOG(1) << "Successfully coalesced packet: encryption_level: "
<< packet.encryption_level
<< ", encrypted_length: " << packet.encrypted_length
<< ", current length: " << length_
<< ", max_packet_length: " << max_packet_length_;
if (length_ > 0) {
QUIC_CODE_COUNT(QUIC_SUCCESSFULLY_COALESCED_MULTIPLE_PACKETS);
}
ecn_codepoint_ = ecn_codepoint;
length_ += packet.encrypted_length;
transmission_types_[packet.encryption_level] = packet.transmission_type;
if (packet.encryption_level == ENCRYPTION_INITIAL) {
initial_packet_ = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, allocator, false));
return true;
}
encrypted_buffers_[packet.encryption_level] =
std::string(packet.encrypted_buffer, packet.encrypted_length);
return true;
}
void QuicCoalescedPacket::Clear() {
self_address_ = QuicSocketAddress();
peer_address_ = QuicSocketAddress();
length_ = 0;
max_packet_length_ = 0;
for (auto& packet : encrypted_buffers_) {
packet.clear();
}
for (size_t i = ENCRYPTION_INITIAL; i < NUM_ENCRYPTION_LEVELS; ++i) {
transmission_types_[i] = NOT_RETRANSMISSION;
}
initial_packet_ = nullptr;
}
void QuicCoalescedPacket::NeuterInitialPacket() {
if (initial_packet_ == nullptr) {
return;
}
if (length_ < initial_packet_->encrypted_length) {
QUIC_BUG(quic_bug_10611_3)
<< "length_: " << length_ << ", is less than initial packet length: "
<< initial_packet_->encrypted_length;
Clear();
return;
}
length_ -= initial_packet_->encrypted_length;
if (length_ == 0) {
Clear();
return;
}
transmission_types_[ENCRYPTION_INITIAL] = NOT_RETRANSMISSION;
initial_packet_ = nullptr;
}
bool QuicCoalescedPacket::CopyEncryptedBuffers(char* buffer, size_t buffer_len,
size_t* length_copied) const {
*length_copied = 0;
for (const auto& packet : encrypted_buffers_) {
if (packet.empty()) {
continue;
}
if (packet.length() > buffer_len) {
return false;
}
memcpy(buffer, packet.data(), packet.length());
buffer += packet.length();
buffer_len -= packet.length();
*length_copied += packet.length();
}
return true;
}
bool QuicCoalescedPacket::ContainsPacketOfEncryptionLevel(
EncryptionLevel level) const {
return !encrypted_buffers_[level].empty() ||
(level == ENCRYPTION_INITIAL && initial_packet_ != nullptr);
}
TransmissionType QuicCoalescedPacket::TransmissionTypeOfPacket(
EncryptionLevel level) const {
if (!ContainsPacketOfEncryptionLevel(level)) {
QUIC_BUG(quic_bug_10611_4)
<< "Coalesced packet does not contain packet of encryption level: "
<< EncryptionLevelToString(level);
return NOT_RETRANSMISSION;
}
return transmission_types_[level];
}
size_t QuicCoalescedPacket::NumberOfPackets() const {
size_t num_of_packets = 0;
for (int8_t i = ENCRYPTION_INITIAL; i < NUM_ENCRYPTION_LEVELS; ++i) {
if (ContainsPacketOfEncryptionLevel(static_cast<EncryptionLevel>(i))) {
++num_of_packets;
}
}
return num_of_packets;
}
std::string QuicCoalescedPacket::ToString(size_t serialized_length) const {
std::string info = absl::StrCat(
"total_length: ", serialized_length,
" padding_size: ", serialized_length - length_, " packets: {");
bool first_packet = true;
for (int8_t i = ENCRYPTION_INITIAL; i < NUM_ENCRYPTION_LEVELS; ++i) {
if (ContainsPacketOfEncryptionLevel(static_cast<EncryptionLevel>(i))) {
absl::StrAppend(&info, first_packet ? "" : ", ",
EncryptionLevelToString(static_cast<EncryptionLevel>(i)));
first_packet = false;
}
}
absl::StrAppend(&info, "}");
return info;
}
std::vector<size_t> QuicCoalescedPacket::packet_lengths() const {
std::vector<size_t> lengths;
for (const auto& packet : encrypted_buffers_) {
if (lengths.empty()) {
lengths.push_back(
initial_packet_ == nullptr ? 0 : initial_packet_->encrypted_length);
} else {
lengths.push_back(packet.length());
}
}
return lengths;
}
} | #include "quiche/quic/core/quic_coalesced_packet.h"
#include <string>
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
TEST(QuicCoalescedPacketTest, MaybeCoalescePacket) {
QuicCoalescedPacket coalesced;
EXPECT_EQ("total_length: 0 padding_size: 0 packets: {}",
coalesced.ToString(0));
quiche::SimpleBufferAllocator allocator;
EXPECT_EQ(0u, coalesced.length());
EXPECT_EQ(0u, coalesced.NumberOfPackets());
char buffer[1000];
QuicSocketAddress self_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress peer_address(QuicIpAddress::Loopback4(), 2);
SerializedPacket packet1(QuicPacketNumber(1), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet1.transmission_type = PTO_RETRANSMISSION;
QuicAckFrame ack_frame(InitAckFrame(1));
packet1.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet1.retransmittable_frames.push_back(
QuicFrame(QuicStreamFrame(1, true, 0, 100)));
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet1, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(PTO_RETRANSMISSION,
coalesced.TransmissionTypeOfPacket(ENCRYPTION_INITIAL));
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(500u, coalesced.length());
EXPECT_EQ(1u, coalesced.NumberOfPackets());
EXPECT_EQ(
"total_length: 1500 padding_size: 1000 packets: {ENCRYPTION_INITIAL}",
coalesced.ToString(1500));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
SerializedPacket packet2(QuicPacketNumber(2), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
EXPECT_FALSE(coalesced.MaybeCoalescePacket(
packet2, self_address, peer_address, &allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
SerializedPacket packet3(QuicPacketNumber(3), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet3.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(100)));
packet3.encryption_level = ENCRYPTION_ZERO_RTT;
packet3.transmission_type = LOSS_RETRANSMISSION;
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet3, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(1000u, coalesced.length());
EXPECT_EQ(2u, coalesced.NumberOfPackets());
EXPECT_EQ(LOSS_RETRANSMISSION,
coalesced.TransmissionTypeOfPacket(ENCRYPTION_ZERO_RTT));
EXPECT_EQ(
"total_length: 1500 padding_size: 500 packets: {ENCRYPTION_INITIAL, "
"ENCRYPTION_ZERO_RTT}",
coalesced.ToString(1500));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
SerializedPacket packet4(QuicPacketNumber(4), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet4.encryption_level = ENCRYPTION_FORWARD_SECURE;
EXPECT_FALSE(coalesced.MaybeCoalescePacket(
packet4, QuicSocketAddress(QuicIpAddress::Loopback4(), 3), peer_address,
&allocator, 1500, ECN_NOT_ECT));
SerializedPacket packet5(QuicPacketNumber(5), PACKET_4BYTE_PACKET_NUMBER,
buffer, 501, false, false);
packet5.encryption_level = ENCRYPTION_FORWARD_SECURE;
EXPECT_FALSE(coalesced.MaybeCoalescePacket(
packet5, self_address, peer_address, &allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(1000u, coalesced.length());
EXPECT_EQ(2u, coalesced.NumberOfPackets());
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
SerializedPacket packet6(QuicPacketNumber(6), PACKET_4BYTE_PACKET_NUMBER,
buffer, 100, false, false);
packet6.encryption_level = ENCRYPTION_FORWARD_SECURE;
EXPECT_QUIC_BUG(
coalesced.MaybeCoalescePacket(packet6, self_address, peer_address,
&allocator, 1000, ECN_NOT_ECT),
"Max packet length changes in the middle of the write path");
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(1000u, coalesced.length());
EXPECT_EQ(2u, coalesced.NumberOfPackets());
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
}
TEST(QuicCoalescedPacketTest, CopyEncryptedBuffers) {
QuicCoalescedPacket coalesced;
quiche::SimpleBufferAllocator allocator;
QuicSocketAddress self_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress peer_address(QuicIpAddress::Loopback4(), 2);
std::string buffer(500, 'a');
std::string buffer2(500, 'b');
SerializedPacket packet1(QuicPacketNumber(1), PACKET_4BYTE_PACKET_NUMBER,
buffer.data(), 500,
false, false);
packet1.encryption_level = ENCRYPTION_ZERO_RTT;
SerializedPacket packet2(QuicPacketNumber(2), PACKET_4BYTE_PACKET_NUMBER,
buffer2.data(), 500,
false, false);
packet2.encryption_level = ENCRYPTION_FORWARD_SECURE;
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet1, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet2, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(1000u, coalesced.length());
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
char copy_buffer[1000];
size_t length_copied = 0;
EXPECT_FALSE(
coalesced.CopyEncryptedBuffers(copy_buffer, 900, &length_copied));
ASSERT_TRUE(
coalesced.CopyEncryptedBuffers(copy_buffer, 1000, &length_copied));
EXPECT_EQ(1000u, length_copied);
char expected[1000];
memset(expected, 'a', 500);
memset(expected + 500, 'b', 500);
quiche::test::CompareCharArraysWithHexError("copied buffers", copy_buffer,
length_copied, expected, 1000);
}
TEST(QuicCoalescedPacketTest, NeuterInitialPacket) {
QuicCoalescedPacket coalesced;
EXPECT_EQ("total_length: 0 padding_size: 0 packets: {}",
coalesced.ToString(0));
coalesced.NeuterInitialPacket();
EXPECT_EQ("total_length: 0 padding_size: 0 packets: {}",
coalesced.ToString(0));
quiche::SimpleBufferAllocator allocator;
EXPECT_EQ(0u, coalesced.length());
char buffer[1000];
QuicSocketAddress self_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress peer_address(QuicIpAddress::Loopback4(), 2);
SerializedPacket packet1(QuicPacketNumber(1), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet1.transmission_type = PTO_RETRANSMISSION;
QuicAckFrame ack_frame(InitAckFrame(1));
packet1.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet1.retransmittable_frames.push_back(
QuicFrame(QuicStreamFrame(1, true, 0, 100)));
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet1, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(PTO_RETRANSMISSION,
coalesced.TransmissionTypeOfPacket(ENCRYPTION_INITIAL));
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(500u, coalesced.length());
EXPECT_EQ(
"total_length: 1500 padding_size: 1000 packets: {ENCRYPTION_INITIAL}",
coalesced.ToString(1500));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
coalesced.NeuterInitialPacket();
EXPECT_EQ(0u, coalesced.max_packet_length());
EXPECT_EQ(0u, coalesced.length());
EXPECT_EQ("total_length: 0 padding_size: 0 packets: {}",
coalesced.ToString(0));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet1, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
SerializedPacket packet2(QuicPacketNumber(3), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet2.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(100)));
packet2.encryption_level = ENCRYPTION_ZERO_RTT;
packet2.transmission_type = LOSS_RETRANSMISSION;
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet2, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(1000u, coalesced.length());
EXPECT_EQ(LOSS_RETRANSMISSION,
coalesced.TransmissionTypeOfPacket(ENCRYPTION_ZERO_RTT));
EXPECT_EQ(
"total_length: 1500 padding_size: 500 packets: {ENCRYPTION_INITIAL, "
"ENCRYPTION_ZERO_RTT}",
coalesced.ToString(1500));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
coalesced.NeuterInitialPacket();
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(500u, coalesced.length());
EXPECT_EQ(
"total_length: 1500 padding_size: 1000 packets: {ENCRYPTION_ZERO_RTT}",
coalesced.ToString(1500));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
SerializedPacket packet3(QuicPacketNumber(5), PACKET_4BYTE_PACKET_NUMBER,
buffer, 501, false, false);
packet3.encryption_level = ENCRYPTION_FORWARD_SECURE;
EXPECT_TRUE(coalesced.MaybeCoalescePacket(packet3, self_address, peer_address,
&allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(1001u, coalesced.length());
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
coalesced.NeuterInitialPacket();
EXPECT_EQ(1500u, coalesced.max_packet_length());
EXPECT_EQ(1001u, coalesced.length());
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_NOT_ECT);
}
TEST(QuicCoalescedPacketTest, DoNotCoalesceDifferentEcn) {
QuicCoalescedPacket coalesced;
EXPECT_EQ("total_length: 0 padding_size: 0 packets: {}",
coalesced.ToString(0));
quiche::SimpleBufferAllocator allocator;
EXPECT_EQ(0u, coalesced.length());
EXPECT_EQ(0u, coalesced.NumberOfPackets());
char buffer[1000];
QuicSocketAddress self_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress peer_address(QuicIpAddress::Loopback4(), 2);
SerializedPacket packet1(QuicPacketNumber(1), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet1.transmission_type = PTO_RETRANSMISSION;
QuicAckFrame ack_frame(InitAckFrame(1));
packet1.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet1.retransmittable_frames.push_back(
QuicFrame(QuicStreamFrame(1, true, 0, 100)));
ASSERT_TRUE(coalesced.MaybeCoalescePacket(packet1, self_address, peer_address,
&allocator, 1500, ECN_ECT1));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_ECT1);
SerializedPacket packet2(QuicPacketNumber(2), PACKET_4BYTE_PACKET_NUMBER,
buffer, 500, false, false);
packet2.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(100)));
packet2.encryption_level = ENCRYPTION_ZERO_RTT;
packet2.transmission_type = LOSS_RETRANSMISSION;
EXPECT_FALSE(coalesced.MaybeCoalescePacket(
packet2, self_address, peer_address, &allocator, 1500, ECN_NOT_ECT));
EXPECT_EQ(coalesced.ecn_codepoint(), ECN_ECT1);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_coalesced_packet.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_coalesced_packet_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
07d7b812-5975-4ca6-ad06-b484fe868faf | cpp | google/tensorstore | gcs_key_value_store | tensorstore/kvstore/gcs_http/gcs_key_value_store.cc | tensorstore/kvstore/gcs_http/gcs_key_value_store_test.cc | #include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/concurrency_resource.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_provider.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/internal/retries_context_resource.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/gcs/gcs_resource.h"
#include "tensorstore/kvstore/gcs/validate.h"
#include "tensorstore/kvstore/gcs_http/gcs_resource.h"
#include "tensorstore/kvstore/gcs_http/object_metadata.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/http/byte_range_util.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/garbage_collection/std_optional.h"
ABSL_FLAG(std::optional<std::string>, tensorstore_gcs_http_url, std::nullopt,
"Url to used for http access to google cloud storage. "
"Overrides TENSORSTORE_GCS_HTTP_URL.");
ABSL_FLAG(std::optional<std::string>, tensorstore_gcs_http_version,
std::nullopt,
"Url to used for http access to google cloud storage. "
"Overrides TENSORSTORE_GCS_HTTP_VERSION.");
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::NoRateLimiter;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_kvstore_gcs_http::GcsConcurrencyResource;
using ::tensorstore::internal_kvstore_gcs_http::GcsRateLimiterResource;
using ::tensorstore::internal_kvstore_gcs_http::ObjectMetadata;
using ::tensorstore::internal_kvstore_gcs_http::ParseObjectMetadata;
using ::tensorstore::internal_storage_gcs::GcsHttpResponseToStatus;
using ::tensorstore::internal_storage_gcs::GcsRequestRetries;
using ::tensorstore::internal_storage_gcs::GcsUserProjectResource;
using ::tensorstore::internal_storage_gcs::IsRetriable;
using ::tensorstore::internal_storage_gcs::IsValidBucketName;
using ::tensorstore::internal_storage_gcs::IsValidObjectName;
using ::tensorstore::internal_storage_gcs::IsValidStorageGeneration;
using ::tensorstore::kvstore::Key;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListOptions;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
namespace {
static constexpr char kUriScheme[] = "gs";
}
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct GcsMetrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& retries;
};
auto gcs_metrics = []() -> GcsMetrics {
return {
TENSORSTORE_KVSTORE_COMMON_METRICS(gcs),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
gcs, retries, "count of all retried requests (read/write/delete)")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag gcs_http_logging("gcs_http");
std::string GetGcsBaseUrl() {
return GetFlagOrEnvValue(FLAGS_tensorstore_gcs_http_url,
"TENSORSTORE_GCS_HTTP_URL")
.value_or("https:
}
IssueRequestOptions::HttpVersion GetHttpVersion() {
using HttpVersion = IssueRequestOptions::HttpVersion;
static auto http_version = []() -> HttpVersion {
auto version = GetFlagOrEnvValue(FLAGS_tensorstore_gcs_http_version,
"TENSORSTORE_GCS_HTTP_VERSION");
if (!version) {
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "--tensorstore_gcs_http_version unset";
return HttpVersion::kDefault;
}
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "--tensorstore_gcs_http_version=" << *version;
if (*version == "1" || *version == "1.1") {
return HttpVersion::kHttp1;
}
if (*version == "2" || *version == "2.0") {
return HttpVersion::kHttp2PriorKnowledge;
}
return HttpVersion::kHttp2TLS;
}();
return http_version;
}
bool AddGenerationParam(std::string* url, const bool has_query,
std::string_view param_name,
const StorageGeneration& gen) {
if (StorageGeneration::IsUnknown(gen)) {
return false;
} else {
absl::StrAppend(url, (has_query ? "&" : "?"), param_name, "=",
StorageGeneration::ToUint64(gen));
return true;
}
}
bool AddUserProjectParam(std::string* url, const bool has_query,
std::string_view encoded_user_project) {
if (!encoded_user_project.empty()) {
absl::StrAppend(url, (has_query ? "&" : "?"),
"userProject=", encoded_user_project);
return true;
}
return false;
}
std::string BucketResourceRoot(std::string_view bucket) {
const char kVersion[] = "v1";
return absl::StrCat(GetGcsBaseUrl(), "/storage/", kVersion, "/b/", bucket);
}
std::string BucketUploadRoot(std::string_view bucket) {
const char kVersion[] = "v1";
return absl::StrCat(GetGcsBaseUrl(), "/upload/storage/", kVersion, "/b/",
bucket);
}
struct GcsKeyValueStoreSpecData {
std::string bucket;
Context::Resource<GcsConcurrencyResource> request_concurrency;
std::optional<Context::Resource<GcsRateLimiterResource>> rate_limiter;
Context::Resource<GcsUserProjectResource> user_project;
Context::Resource<GcsRequestRetries> retries;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.bucket, x.request_concurrency, x.rate_limiter, x.user_project,
x.retries, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member("bucket",
jb::Projection<&GcsKeyValueStoreSpecData::bucket>(jb::Validate(
[](const auto& options, const std::string* x) {
if (!IsValidBucketName(*x)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid GCS bucket name: ", QuoteString(*x)));
}
return absl::OkStatus();
}))),
jb::Member(
GcsConcurrencyResource::id,
jb::Projection<&GcsKeyValueStoreSpecData::request_concurrency>()),
jb::Member(GcsRateLimiterResource::id,
jb::Projection<&GcsKeyValueStoreSpecData::rate_limiter>()),
jb::Member(GcsUserProjectResource::id,
jb::Projection<&GcsKeyValueStoreSpecData::user_project>()),
jb::Member(GcsRequestRetries::id,
jb::Projection<&GcsKeyValueStoreSpecData::retries>()),
jb::Member(DataCopyConcurrencyResource::id,
jb::Projection<
&GcsKeyValueStoreSpecData::data_copy_concurrency>())
);
};
std::string GetGcsUrl(std::string_view bucket, std::string_view path) {
return absl::StrCat(kUriScheme, ":
internal::PercentEncodeUriPath(path));
}
class GcsKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<GcsKeyValueStoreSpec,
GcsKeyValueStoreSpecData> {
public:
static constexpr char id[] = "gcs";
absl::Status NormalizeSpec(std::string& path) override {
if (!path.empty() && !IsValidObjectName(path)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid GCS path: ", QuoteString(path)));
}
return absl::OkStatus();
}
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return GetGcsUrl(data_.bucket, path);
}
};
class GcsKeyValueStore
: public internal_kvstore::RegisteredDriver<GcsKeyValueStore,
GcsKeyValueStoreSpec> {
public:
const std::string& resource_root() const { return resource_root_; }
const std::string& upload_root() const { return upload_root_; }
const std::string& encoded_user_project() const {
return encoded_user_project_;
}
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
return internal_kvstore_batch::kDefaultRemoteStorageCoalescingOptions;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<const void> DeleteRange(KeyRange range) override;
Result<std::optional<std::string>> GetAuthHeader() {
absl::MutexLock lock(&auth_provider_mutex_);
if (!auth_provider_) {
auto result = tensorstore::internal_oauth2::GetSharedGoogleAuthProvider();
if (!result.ok() && absl::IsNotFound(result.status())) {
auth_provider_ = nullptr;
} else {
TENSORSTORE_RETURN_IF_ERROR(result);
auth_provider_ = *std::move(result);
}
}
if (!*auth_provider_) return std::nullopt;
auto auth_header_result = (*auth_provider_)->GetAuthHeader();
if (!auth_header_result.ok() &&
absl::IsNotFound(auth_header_result.status())) {
return std::nullopt;
}
return auth_header_result;
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
RateLimiter& read_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->read_limiter);
}
return no_rate_limiter_;
}
RateLimiter& write_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->write_limiter);
}
return no_rate_limiter_;
}
RateLimiter& admission_queue() { return *spec_.request_concurrency->queue; }
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
std::string DescribeKey(std::string_view key) override {
return GetGcsUrl(spec_.bucket, key);
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
template <typename Task>
absl::Status BackoffForAttemptAsync(
absl::Status status, int attempt, Task* task,
SourceLocation loc = ::tensorstore::SourceLocation::current()) {
assert(task != nullptr);
auto delay = spec_.retries->BackoffForAttempt(attempt);
if (!delay) {
return MaybeAnnotateStatus(std::move(status),
absl::StrFormat("All %d retry attempts failed",
spec_.retries->max_retries),
absl::StatusCode::kAborted, loc);
}
gcs_metrics.retries.Increment();
ScheduleAt(absl::Now() + *delay,
WithExecutor(executor(), [task = IntrusivePtr<Task>(task)] {
task->Retry();
}));
return absl::OkStatus();
}
SpecData spec_;
std::string resource_root_;
std::string upload_root_;
std::string encoded_user_project_;
NoRateLimiter no_rate_limiter_;
std::shared_ptr<HttpTransport> transport_;
absl::Mutex auth_provider_mutex_;
std::optional<std::shared_ptr<internal_oauth2::AuthProvider>> auth_provider_;
};
Future<kvstore::DriverPtr> GcsKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<GcsKeyValueStore>();
driver->spec_ = data_;
driver->resource_root_ = BucketResourceRoot(data_.bucket);
driver->upload_root_ = BucketUploadRoot(data_.bucket);
driver->transport_ = internal_http::GetDefaultHttpTransport();
if (data_.rate_limiter.has_value()) {
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "Using experimental_gcs_rate_limiter";
}
if (const auto& project_id = data_.user_project->project_id) {
driver->encoded_user_project_ =
internal::PercentEncodeUriComponent(*project_id);
}
return driver;
}
void AddUniqueQueryParameterToDisableCaching(std::string& url) {
struct RandomState {
absl::Mutex mutex;
absl::BitGen gen ABSL_GUARDED_BY(mutex);
};
static RandomState random_state;
uint64_t uuid[2];
absl::MutexLock lock(&random_state.mutex);
for (auto& x : uuid) {
x = absl::Uniform<uint64_t>(random_state.gen);
}
absl::StrAppend(&url, "&tensorstore=", absl::Hex(uuid[0], absl::kZeroPad16),
absl::Hex(uuid[1], absl::kZeroPad16));
}
struct ReadTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ReadTask> {
IntrusivePtr<GcsKeyValueStore> owner;
std::string resource;
kvstore::ReadOptions options;
Promise<kvstore::ReadResult> promise;
int attempt_ = 0;
absl::Time start_time_;
ReadTask(IntrusivePtr<GcsKeyValueStore> owner, std::string resource,
kvstore::ReadOptions options, Promise<kvstore::ReadResult> promise)
: owner(std::move(owner)),
resource(std::move(resource)),
options(std::move(options)),
promise(std::move(promise)) {}
~ReadTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->read_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &ReadTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<ReadTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
std::string media_url = absl::StrCat(
resource, options.byte_range.size() == 0 ? "?alt=json" : "?alt=media");
AddGenerationParam(&media_url, true, "ifGenerationNotMatch",
options.generation_conditions.if_not_equal);
AddGenerationParam(&media_url, true, "ifGenerationMatch",
options.generation_conditions.if_equal);
AddUserProjectParam(&media_url, true, owner->encoded_user_project());
AddUniqueQueryParameterToDisableCaching(media_url);
auto maybe_auth_header = owner->GetAuthHeader();
if (!maybe_auth_header.ok()) {
promise.SetResult(maybe_auth_header.status());
return;
}
HttpRequestBuilder request_builder("GET", media_url);
if (maybe_auth_header.value().has_value()) {
request_builder.AddHeader(*maybe_auth_header.value());
}
if (options.byte_range.size() != 0) {
request_builder.MaybeAddRangeHeader(options.byte_range);
}
auto request = request_builder.EnableAcceptEncoding().BuildRequest();
start_time_ = absl::Now();
ABSL_LOG_IF(INFO, gcs_http_logging) << "ReadTask: " << request;
auto future = owner->transport_->IssueRequest(
request, IssueRequestOptions().SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady([self = IntrusivePtr<ReadTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "ReadTask " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) return response.status();
switch (response.value().status_code) {
case 412:
case 404:
case 304:
return absl::OkStatus();
}
return GcsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
} else {
promise.SetResult(FinishResponse(response.value()));
}
}
Result<kvstore::ReadResult> FinishResponse(const HttpResponse& httpresponse) {
gcs_metrics.bytes_read.IncrementBy(httpresponse.payload.size());
auto latency = absl::Now() - start_time_;
gcs_metrics.read_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
switch (httpresponse.status_code) {
case 204:
case 404:
return kvstore::ReadResult::Missing(start_time_);
case 412:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
StorageGeneration::Unknown(), start_time_});
case 304:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
options.generation_conditions.if_not_equal, start_time_});
}
absl::Cord value;
ObjectMetadata metadata;
if (options.byte_range.size() != 0) {
ByteRange byte_range;
int64_t total_size;
TENSORSTORE_RETURN_IF_ERROR(internal_http::ValidateResponseByteRange(
httpresponse, options.byte_range, value, byte_range, total_size));
SetObjectMetadataFromHeaders(httpresponse.headers, &metadata);
} else {
absl::Cord cord = httpresponse.payload;
TENSORSTORE_ASSIGN_OR_RETURN(metadata,
ParseObjectMetadata(cord.Flatten()));
}
auto generation = StorageGeneration::FromUint64(metadata.generation);
return kvstore::ReadResult::Value(
std::move(value),
TimestampedStorageGeneration{std::move(generation), start_time_});
}
};
Future<kvstore::ReadResult> GcsKeyValueStore::Read(Key key,
ReadOptions options) {
gcs_metrics.read.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid GCS object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal) ||
!IsValidStorageGeneration(options.generation_conditions.if_not_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> GcsKeyValueStore::ReadImpl(Key&& key,
ReadOptions&& options) {
gcs_metrics.batch_read.Increment();
auto encoded_object_name = internal::PercentEncodeUriComponent(key);
std::string resource = tensorstore::internal::JoinPath(resource_root_, "/o/",
encoded_object_name);
auto op = PromiseFuturePair<ReadResult>::Make();
auto state = internal::MakeIntrusivePtr<ReadTask>(
internal::IntrusivePtr<GcsKeyValueStore>(this), std::move(resource),
std::move(options), std::move(op.promise));
intrusive_ptr_increment(state.get());
read_rate_limiter().Admit(state.get(), &ReadTask::Start);
return std::move(op.future);
}
struct WriteTask : public RateLimiterNode,
public internal::AtomicReferenceCount<WriteTask> {
IntrusivePtr<GcsKeyValueStore> owner;
std::string encoded_object_name;
absl::Cord value;
kvstore::WriteOptions options;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
WriteTask(IntrusivePtr<GcsKeyValueStore> owner,
std::string encoded_object_name, absl::Cord value,
kvstore::WriteOptions options,
Promise<TimestampedStorageGeneration> promise)
: owner(std::move(owner)),
encoded_object_name(std::move(encoded_object_name)),
value(std::move(value)),
options(std::move(options)),
promise(std::move(promise)) {}
~WriteTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<WriteTask*>(task);
self->owner->write_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &WriteTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<WriteTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<WriteTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
std::string upload_url =
absl::StrCat(owner->upload_root(), "/o", "?uploadType=media",
"&name=", encoded_object_name);
AddGenerationParam(&upload_url, true, "ifGenerationMatch",
options.generation_conditions.if_equal);
AddUserProjectParam(&upload_url, true, owner->encoded_user_project());
auto maybe_auth_header = owner->GetAuthHeader();
if (!maybe_auth_header.ok()) {
promise.SetResult(maybe_auth_header.status());
return;
}
HttpRequestBuilder request_builder("POST", upload_url);
if (maybe_auth_header.value().has_value()) {
request_builder.AddHeader(*maybe_auth_header.value());
}
auto request =
request_builder.AddHeader("Content-Type: application/octet-stream")
.AddHeader(absl::StrCat("Content-Length: ", value.size()))
.BuildRequest();
start_time_ = absl::Now();
ABSL_LOG_IF(INFO, gcs_http_logging)
<< "WriteTask: " << request << " size=" << value.size();
auto future = owner->transport_->IssueRequest(
request, IssueRequestOptions(value).SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady([self = IntrusivePtr<WriteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "WriteTask " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) return response.status();
switch (response.value().status_code) {
case 304:
[[fallthrough]];
case 412:
return absl::OkStatus();
case 404:
if (!options.generation_conditions.MatchesNoValue()) {
return absl::OkStatus();
}
break;
default:
break;
}
return GcsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
} else {
promise.SetResult(FinishResponse(response.value()));
}
}
Result<TimestampedStorageGeneration> FinishResponse(
const HttpResponse& httpresponse) {
TimestampedStorageGeneration r;
r.time = start_time_;
switch (httpresponse.status_code) {
case 304:
[[fallthrough]];
case 412:
r.generation = StorageGeneration::Unknown();
return r;
case 404:
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_equal)) {
r.generation = StorageGeneration::Unknown();
return r;
}
}
auto latency = absl::Now() - start_time_;
gcs_metrics.write_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
gcs_metrics.bytes_written.IncrementBy(value.size());
auto payload = httpresponse.payload;
auto parsed_object_metadata = ParseObjectMetadata(payload.Flatten());
TENSORSTORE_RETURN_IF_ERROR(parsed_object_metadata);
r.generation =
StorageGeneration::FromUint64(parsed_object_metadata->generation);
return r;
}
};
struct DeleteTask : public RateLimiterNode,
public internal::AtomicReferenceCount<DeleteTask> {
IntrusivePtr<GcsKeyValueStore> owner;
std::string resource;
kvstore::WriteOptions options;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
DeleteTask(IntrusivePtr<GcsKeyValueStore> owner, std::string resource,
kvstore::WriteOptions options,
Promise<TimestampedStorageGeneration> promise)
: owner(std::move(owner)),
resource(std::move(resource)),
options(std::move(options)),
promise(std::move(promise)) {}
~DeleteTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<DeleteTask*>(task);
self->owner->write_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &DeleteTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<DeleteTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<DeleteTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
std::string delete_url = resource;
bool has_query = AddGenerationParam(&delete_url, false, "ifGenerationMatch",
options.generation_conditions.if_equal);
AddUserProjectParam(&delete_url, has_query, owner->encoded_user_project());
auto maybe_auth_header = owner->GetAuthHeader();
if (!maybe_auth_header.ok()) {
promise.SetResult(maybe_auth_header.status());
return;
}
HttpRequestBuilder request_builder("DELETE", delete_url);
if (maybe_auth_header.value().has_value()) {
request_builder.AddHeader(*maybe_auth_header.value());
}
auto request = request_builder.BuildRequest();
start_time_ = absl::Now();
ABSL_LOG_IF(INFO, gcs_http_logging) << "DeleteTask: " << request;
auto future = owner->transport_->IssueRequest(
request, IssueRequestOptions().SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady([self = IntrusivePtr<DeleteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "DeleteTask " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) return response.status();
switch (response.value().status_code) {
case 412:
[[fallthrough]];
case 404:
return absl::OkStatus();
default:
break;
}
return GcsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
return;
}
TimestampedStorageGeneration r;
r.time = start_time_;
switch (response.value().status_code) {
case 412:
r.generation = StorageGeneration::Unknown();
break;
case 404:
if (!options.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
break;
}
[[fallthrough]];
default:
r.generation = StorageGeneration::NoValue();
break;
}
promise.SetResult(std::move(r));
}
};
Future<TimestampedStorageGeneration> GcsKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
gcs_metrics.write.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid GCS object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
std::string encoded_object_name = internal::PercentEncodeUriComponent(key);
auto op = PromiseFuturePair<TimestampedStorageGeneration>::Make();
if (value) {
auto state = internal::MakeIntrusivePtr<WriteTask>(
IntrusivePtr<GcsKeyValueStore>(this), std::move(encoded_object_name),
*std::move(value), std::move(options), std::move(op.promise));
intrusive_ptr_increment(state.get());
write_rate_limiter().Admit(state.get(), &WriteTask::Start);
} else {
std::string resource = tensorstore::internal::JoinPath(
resource_root_, "/o/", encoded_object_name);
auto state = internal::MakeIntrusivePtr<DeleteTask>(
IntrusivePtr<GcsKeyValueStore>(this), std::move(resource),
std::move(options), std::move(op.promise));
intrusive_ptr_increment(state.get());
write_rate_limiter().Admit(state.get(), &DeleteTask::Start);
}
return std::move(op.future);
}
struct GcsListResponsePayload {
std::string next_page_token;
std::vector<ObjectMetadata> items;
};
constexpr static auto GcsListResponsePayloadBinder = jb::Object(
jb::Member("nextPageToken",
jb::Projection(&GcsListResponsePayload::next_page_token,
jb::DefaultInitializedValue())),
jb::Member("items", jb::Projection(&GcsListResponsePayload::items,
jb::DefaultInitializedValue())),
jb::DiscardExtraMembers);
struct ListTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ListTask> {
internal::IntrusivePtr<GcsKeyValueStore> owner_;
ListOptions options_;
ListReceiver receiver_;
std::string resource_;
std::string base_list_url_;
std::string next_page_token_;
int attempt_ = 0;
bool has_query_parameters_;
std::atomic<bool> cancelled_{false};
ListTask(internal::IntrusivePtr<GcsKeyValueStore>&& owner,
ListOptions&& options, ListReceiver&& receiver,
std::string&& resource)
: owner_(std::move(owner)),
options_(std::move(options)),
receiver_(std::move(receiver)),
resource_(std::move(resource)) {
base_list_url_ = resource_;
has_query_parameters_ = AddUserProjectParam(&base_list_url_, false,
owner_->encoded_user_project());
if (auto& inclusive_min = options_.range.inclusive_min;
!inclusive_min.empty()) {
absl::StrAppend(
&base_list_url_, (has_query_parameters_ ? "&" : "?"),
"startOffset=", internal::PercentEncodeUriComponent(inclusive_min));
has_query_parameters_ = true;
}
if (auto& exclusive_max = options_.range.exclusive_max;
!exclusive_max.empty()) {
absl::StrAppend(
&base_list_url_, (has_query_parameters_ ? "&" : "?"),
"endOffset=", internal::PercentEncodeUriComponent(exclusive_max));
has_query_parameters_ = true;
}
}
~ListTask() { owner_->admission_queue().Finish(this); }
inline bool is_cancelled() {
return cancelled_.load(std::memory_order_relaxed);
}
static void Start(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
self->owner_->read_rate_limiter().Finish(self);
self->owner_->admission_queue().Admit(self, &ListTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
execution::set_starting(self->receiver_, [self] {
self->cancelled_.store(true, std::memory_order_relaxed);
});
self->owner_->executor()(
[state = IntrusivePtr<ListTask>(self, internal::adopt_object_ref)] {
state->IssueRequest();
});
}
void Retry() { IssueRequest(); }
void IssueRequest() {
if (is_cancelled()) {
execution::set_done(receiver_);
execution::set_stopping(receiver_);
return;
}
std::string list_url = base_list_url_;
if (!next_page_token_.empty()) {
absl::StrAppend(&list_url, (has_query_parameters_ ? "&" : "?"),
"pageToken=", next_page_token_);
}
auto auth_header = owner_->GetAuthHeader();
if (!auth_header.ok()) {
execution::set_error(receiver_, std::move(auth_header).status());
execution::set_stopping(receiver_);
return;
}
HttpRequestBuilder request_builder("GET", list_url);
if (auth_header->has_value()) {
request_builder.AddHeader(auth_header->value());
}
auto request = request_builder.BuildRequest();
ABSL_LOG_IF(INFO, gcs_http_logging) << "List: " << request;
auto future = owner_->transport_->IssueRequest(
request, IssueRequestOptions().SetHttpVersion(GetHttpVersion()));
future.ExecuteWhenReady(WithExecutor(
owner_->executor(), [self = IntrusivePtr<ListTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
}));
}
void OnResponse(const Result<HttpResponse>& response) {
auto status = OnResponseImpl(response);
if (absl::IsCancelled(status)) {
execution::set_done(receiver_);
execution::set_stopping(receiver_);
return;
}
if (!status.ok()) {
execution::set_error(receiver_, std::move(status));
execution::set_stopping(receiver_);
return;
}
}
absl::Status OnResponseImpl(const Result<HttpResponse>& response) {
if (is_cancelled()) {
return absl::CancelledError();
}
ABSL_LOG_IF(INFO, gcs_http_logging.Level(1) && response.ok())
<< "List " << *response;
bool is_retryable = IsRetriable(response.status());
absl::Status status =
response.ok() ? GcsHttpResponseToStatus(response.value(), is_retryable)
: response.status();
if (!status.ok() && is_retryable) {
return owner_->BackoffForAttemptAsync(std::move(status), attempt_++,
this);
}
auto payload = response->payload;
auto j = internal::ParseJson(payload.Flatten());
if (j.is_discarded()) {
return absl::InternalError(absl::StrCat(
"Failed to parse response metadata: ", payload.Flatten()));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto parsed_payload,
jb::FromJson<GcsListResponsePayload>(j, GcsListResponsePayloadBinder));
for (auto& metadata : parsed_payload.items) {
if (is_cancelled()) {
return absl::CancelledError();
}
std::string_view name = metadata.name;
if (options_.strip_prefix_length) {
name = name.substr(options_.strip_prefix_length);
}
execution::set_value(receiver_,
ListEntry{
std::string(name),
ListEntry::checked_size(metadata.size),
});
}
attempt_ = 0;
next_page_token_ = std::move(parsed_payload.next_page_token);
if (!next_page_token_.empty()) {
IssueRequest();
} else {
execution::set_done(receiver_);
execution::set_stopping(receiver_);
}
return absl::OkStatus();
}
};
void GcsKeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
gcs_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
auto state = internal::MakeIntrusivePtr<ListTask>(
IntrusivePtr<GcsKeyValueStore>(this), std::move(options),
std::move(receiver),
tensorstore::internal::JoinPath(resource_root_, "/o"));
intrusive_ptr_increment(state.get());
read_rate_limiter().Admit(state.get(), &ListTask::Start);
}
struct DeleteRangeListReceiver {
IntrusivePtr<GcsKeyValueStore> owner_;
Promise<void> promise_;
FutureCallbackRegistration cancel_registration_;
void set_starting(AnyCancelReceiver cancel) {
cancel_registration_ = promise_.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_value(ListEntry entry) {
assert(!entry.key.empty());
if (!entry.key.empty()) {
LinkError(promise_, owner_->Delete(std::move(entry.key)));
}
}
void set_error(absl::Status error) {
SetDeferredResult(promise_, std::move(error));
promise_ = Promise<void>();
}
void set_done() { promise_ = Promise<void>(); }
void set_stopping() { cancel_registration_.Unregister(); }
};
Future<const void> GcsKeyValueStore::DeleteRange(KeyRange range) {
gcs_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
auto op = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
ListOptions list_options;
list_options.range = std::move(range);
ListImpl(list_options, DeleteRangeListReceiver{
internal::IntrusivePtr<GcsKeyValueStore>(this),
std::move(op.promise)});
return std::move(op.future);
}
Result<kvstore::Spec> ParseGcsUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == kUriScheme);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
if (!IsValidBucketName(parsed.authority)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid GCS bucket name: ", QuoteString(parsed.authority)));
}
auto decoded_path = parsed.path.empty()
? std::string()
: internal::PercentDecode(parsed.path.substr(1));
auto driver_spec = internal::MakeIntrusivePtr<GcsKeyValueStoreSpec>();
driver_spec->data_.bucket = std::string(parsed.authority);
driver_spec->data_.request_concurrency =
Context::Resource<GcsConcurrencyResource>::DefaultSpec();
driver_spec->data_.user_project =
Context::Resource<GcsUserProjectResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<GcsRequestRetries>::DefaultSpec();
driver_spec->data_.data_copy_concurrency =
Context::Resource<DataCopyConcurrencyResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(decoded_path)};
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::GcsKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::GcsKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{kUriScheme, tensorstore::ParseGcsUrl};
} | #include <stddef.h>
#include <algorithm>
#include <atomic>
#include <memory>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/oauth2/google_auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_test_utils.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/gcs_http/gcs_mock.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::CompletionNotifyingReceiver;
using ::tensorstore::Context;
using ::tensorstore::Future;
using ::tensorstore::GCSMockStorageBucket;
using ::tensorstore::KeyRange;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesListEntry;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal_http::ApplyResponseToHandler;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpResponseHandler;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
using ::tensorstore::internal_oauth2::GoogleAuthTestScope;
static constexpr char kDriver[] = "gcs";
class MetadataMockHelper {
public:
tensorstore::Result<HttpResponse> GetResponse(const HttpRequest& request) {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (!absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal/")) {
return absl::UnimplementedError("Mock cannot satisfy the request.");
}
constexpr char kOAuthPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/[email protected]/token";
if (absl::StartsWith(parsed.authority_and_path, kOAuthPath)) {
return HttpResponse{
200,
absl::Cord(
R"({ "token_type" : "refresh", "access_token": "abc", "expires_in": 3600 })")};
}
constexpr char kServiceAccountPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/default/";
if (absl::StartsWith(parsed.authority_and_path, kServiceAccountPath)) {
return HttpResponse{
200, absl::Cord(
R"({ "email": "[email protected]", "scopes": [ "test" ] })")};
}
return HttpResponse{200, absl::Cord()};
}
GoogleAuthTestScope google_auth_test_scope;
};
class MyMockTransport : public HttpTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override {
ApplyResponseToHandler(
[&]() -> Result<HttpResponse> {
auto result = metadata_mock_.GetResponse(request);
if (result.ok()) return result;
for (auto* bucket : buckets_) {
result = bucket->IssueRequest(request, options.payload);
if (result.ok()) break;
}
return result;
}(),
response_handler);
}
MetadataMockHelper metadata_mock_;
std::vector<GCSMockStorageBucket*> buckets_;
};
struct DefaultHttpTransportSetter {
DefaultHttpTransportSetter(std::shared_ptr<HttpTransport> transport) {
SetDefaultHttpTransport(transport);
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
}
~DefaultHttpTransportSetter() {
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
SetDefaultHttpTransport(nullptr);
}
};
Context DefaultTestContext() {
return Context{Context::Spec::FromJson({{"gcs_request_retries",
{{"max_retries", 4},
{"initial_delay", "1ms"},
{"max_delay", "5ms"}}}})
.value()};
}
TEST(GcsKeyValueStoreTest, BadBucketNames) {
auto context = DefaultTestContext();
for (auto bucket :
{"a", "_abc", "abc_", "ABC", "a..b", "a.-.b",
"a."
"0123456789123456789012345678912345678901234567891234567890"
"1234567891234567890123456789123456789012345678912345678901"
"23456789123456789.b"}) {
EXPECT_FALSE(
kvstore::Open({{"driver", kDriver}, {"bucket", bucket}}, context)
.result())
<< "bucket: " << bucket;
}
for (auto bucket : {"abc", "abc.1-2_3.abc"}) {
EXPECT_TRUE(
kvstore::Open({{"driver", kDriver}, {"bucket", bucket}}, context)
.result())
<< "bucket: " << bucket;
}
}
TEST(GcsKeyValueStoreTest, BadObjectNames) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
EXPECT_THAT(kvstore::Read(store, ".").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, "..").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, ".well-known/acme-challenge").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, "foo\nbar").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Read(store, "foo\rbar").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
{
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal =
StorageGeneration::FromString("abc123");
EXPECT_THAT(kvstore::Read(store, "abc", options).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
TEST(GcsKeyValueStoreTest, Basic) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec, store.spec());
EXPECT_THAT(spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(
MatchesJson({{"driver", kDriver}, {"bucket", "my-bucket"}})));
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(GcsKeyValueStoreTest, Retry) {
for (int max_retries : {2, 3, 4}) {
for (bool fail : {false, true}) {
ABSL_LOG(INFO) << max_retries << (fail ? " fail" : " success");
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", kDriver},
{"bucket", "my-bucket"},
{"context",
{
{"gcs_request_retries",
{{"max_retries", max_retries},
{"initial_delay", "1ms"},
{"max_delay", "10ms"}}},
}}},
context)
.result());
if (fail) {
bucket.TriggerErrors(max_retries + 1);
EXPECT_THAT(kvstore::Read(store, "x").result(),
MatchesStatus(absl::StatusCode::kAborted));
} else {
bucket.TriggerErrors(max_retries - 2);
TENSORSTORE_EXPECT_OK(kvstore::Read(store, "x").result());
}
}
}
}
TEST(GcsKeyValueStoreTest, List) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
EXPECT_THAT(ListFuture(store, {}).result(),
::testing::Optional(::testing::ElementsAre()));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/b", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/d", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/x", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/y", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/e", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/f", absl::Cord("xyz")));
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/d", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e", "set_value: a/c/x",
"set_value: a/b", "set_done", "set_stopping"));
}
EXPECT_THAT(ListFuture(store, {}).result(),
::testing::Optional(::testing::UnorderedElementsAre(
MatchesListEntry("a/d"), MatchesListEntry("a/c/z/f"),
MatchesListEntry("a/c/y"), MatchesListEntry("a/c/z/e"),
MatchesListEntry("a/c/x"), MatchesListEntry("a/b"))));
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {KeyRange::Prefix("a/c/")}),
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e",
"set_value: a/c/x", "set_done", "set_stopping"));
}
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{
¬ification, tensorstore::CancelOnStartingReceiver{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
CompletionNotifyingReceiver{
¬ification, tensorstore::CancelAfterNReceiver<2>{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::Contains("set_starting"));
EXPECT_THAT(log, ::testing::Contains("set_done"));
EXPECT_THAT(log, ::testing::Contains("set_stopping"));
EXPECT_LE(4, log.size());
EXPECT_THAT(
log, ::testing::Contains(::testing::AnyOf(
"set_value: a/d", "set_value: a/c/z/f", "set_value: a/c/y",
"set_value: a/c/z/e", "set_value: a/c/x", "set_value: a/b")));
}
EXPECT_THAT(ListFuture(store, {KeyRange::Prefix("a/c/")}).result(),
::testing::Optional(::testing::UnorderedElementsAre(
MatchesListEntry("a/c/z/f"), MatchesListEntry("a/c/y"),
MatchesListEntry("a/c/z/e"), MatchesListEntry("a/c/x"))));
}
TEST(GcsKeyValueStoreTest, SpecRoundtrip) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {{"driver", kDriver}, {"bucket", "my-bucket"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(GcsKeyValueStoreTest, InvalidSpec) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
EXPECT_THAT(
kvstore::Open(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"extra", "key"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Open({{"driver", kDriver}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", kDriver}, {"bucket", 5}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"path", "a\tb"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
TEST(GcsKeyValueStoreTest, RequestorPays) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket1("my-bucket1");
GCSMockStorageBucket bucket2("my-bucket2", "myproject");
mock_transport->buckets_.push_back(&bucket1);
mock_transport->buckets_.push_back(&bucket2);
const auto TestWrite = [&](Context context, auto bucket2_status_matcher) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store1, kvstore::Open({{"driver", kDriver},
{"bucket", "my-bucket1"},
{"context",
{
{"gcs_request_retries",
{{"max_retries", 3},
{"initial_delay", "1ms"},
{"max_delay", "10ms"}}},
}}},
context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open({{"driver", kDriver},
{"bucket", "my-bucket2"},
{"context",
{
{"gcs_request_retries",
{{"max_retries", 3},
{"initial_delay", "1ms"},
{"max_delay", "10ms"}}},
}}},
context)
.result());
TENSORSTORE_EXPECT_OK(kvstore::Write(store1, "abc", absl::Cord("xyz")));
EXPECT_THAT(kvstore::Write(store2, "abc", absl::Cord("xyz")).status(),
bucket2_status_matcher);
};
TestWrite(Context::Default(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
TestWrite(Context(Context::Spec::FromJson(
{{"gcs_user_project", {{"project_id", "badproject"}}}})
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument));
TestWrite(Context(Context::Spec::FromJson(
{{"gcs_user_project", {{"project_id", "myproject"}}}})
.value()),
absl::OkStatus());
}
TEST(GcsKeyValueStoreTest, DeletePrefix) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(GcsKeyValueStoreTest, DeleteRange) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
bucket.SetErrorRate(0.02);
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(GcsKeyValueStoreTest, DeleteRangeToEnd) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(GcsKeyValueStoreTest, DeleteRangeFromBeginning) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
class MyDeleteRangeCancellationMockTransport : public MyMockTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) final {
if (request.method == "DELETE") {
cancellation_notification_.WaitForNotification();
++total_delete_requests_;
}
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
}
std::atomic<size_t> total_delete_requests_{0};
absl::Notification cancellation_notification_;
};
TEST(GcsKeyValueStoreTest, DeleteRangeCancellation) {
auto mock_transport =
std::make_shared<MyDeleteRangeCancellationMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{
{"driver", kDriver},
{"bucket", "my-bucket"},
{"context", {{"gcs_request_concurrency", {{"limit", 1}}}}},
},
context)
.result());
for (std::string key : {"a/b", "a/c/a", "a/c/b", "a/c/d", "a/d"}) {
TENSORSTORE_ASSERT_OK(kvstore::Write(store, key, absl::Cord()));
}
{
[[maybe_unused]] auto future =
kvstore::DeleteRange(store, tensorstore::KeyRange{"a/ba", "a/ca"});
}
mock_transport->cancellation_notification_.Notify();
absl::SleepFor(absl::Milliseconds(100));
EXPECT_GE(1, mock_transport->total_delete_requests_.load());
EXPECT_THAT(ListFuture(store).result(),
::testing::Optional(::testing::SizeIs(::testing::Ge(4))));
}
class MyConcurrentMockTransport : public MyMockTransport {
public:
size_t reset() {
absl::MutexLock lock(&concurrent_request_mutex_);
cur_concurrent_requests_ = 0;
return std::exchange(max_concurrent_requests_, 0);
}
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) final {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal/")) {
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
return;
}
{
absl::MutexLock lock(&concurrent_request_mutex_);
++cur_concurrent_requests_;
max_concurrent_requests_ =
std::max(max_concurrent_requests_, cur_concurrent_requests_);
}
auto op = tensorstore::PromiseFuturePair<HttpResponse>::Make();
ScheduleAt(absl::Now() + absl::Milliseconds(5),
[this, r = request, o = std::move(options), response_handler] {
absl::MutexLock lock(&concurrent_request_mutex_);
--cur_concurrent_requests_;
MyMockTransport::IssueRequestWithHandler(r, std::move(o),
response_handler);
});
}
size_t cur_concurrent_requests_ = 0;
size_t max_concurrent_requests_ = 0;
absl::Mutex concurrent_request_mutex_;
};
TEST(GcsKeyValueStoreTest, Concurrency) {
auto mock_transport = std::make_shared<MyConcurrentMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
const auto TestConcurrency = [&](size_t limit) {
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{
{"driver", kDriver},
{"bucket", "my-bucket"},
{"context",
{{"gcs_request_concurrency", {{"limit", limit}}}}}
},
context)
.result());
std::vector<tensorstore::Future<kvstore::ReadResult>> futures;
for (size_t i = 0; i < 10 * limit; ++i) {
futures.push_back(kvstore::Read(store, "abc"));
}
for (const auto& future : futures) {
future.Wait();
}
};
TestConcurrency(1);
EXPECT_EQ(1, mock_transport->reset());
TestConcurrency(2);
EXPECT_EQ(2, mock_transport->reset());
TestConcurrency(3);
EXPECT_EQ(3, mock_transport->reset());
}
class MyRateLimitedMockTransport : public MyMockTransport {
public:
std::tuple<absl::Time, absl::Time, size_t> reset() {
absl::MutexLock l(&request_timing_mutex_);
return {min_time_, max_time_, std::exchange(count_, 0)};
}
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) final {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal/")) {
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
return;
}
{
absl::MutexLock l(&request_timing_mutex_);
max_time_ = absl::Now();
if (count_++ == 0) {
min_time_ = max_time_;
}
}
MyMockTransport::IssueRequestWithHandler(request, std::move(options),
response_handler);
}
absl::Time min_time_;
absl::Time max_time_;
size_t count_;
absl::Mutex request_timing_mutex_;
};
TEST(GcsKeyValueStoreTest, RateLimited) {
auto mock_transport = std::make_shared<MyRateLimitedMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
const auto TestRateLimiting = [&](size_t limit) {
tensorstore::Context context{
tensorstore::Context::Spec::FromJson(
{
{"gcs_request_concurrency", {{"limit", 128}}},
{"data_copy_concurrency", {{"limit", 128}}},
{"experimental_gcs_rate_limiter",
{{"read_rate", limit},
{"write_rate", limit},
{"doubling_time", "20m"}}},
})
.value()};
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto store,
kvstore::Open(
{
{"driver", kDriver},
{"bucket", "my-bucket"},
},
context)
.result());
kvstore::Read(store, "xyz").Wait();
mock_transport->reset();
std::vector<tensorstore::Future<kvstore::ReadResult>> futures;
for (size_t i = 0; i < 100; ++i) {
futures.push_back(kvstore::Read(store, "abc"));
}
for (const auto& future : futures) {
future.Wait();
}
auto t = mock_transport->reset();
return std::get<1>(t) - std::get<0>(t);
};
[[maybe_unused]] auto a = TestRateLimiting(10);
[[maybe_unused]] auto b = TestRateLimiting(1000);
#if 0
EXPECT_THAT(b, testing::Lt(a));
#endif
}
TEST(GcsKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"path", "abc"}},
"gs:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", kDriver}, {"bucket", "my-bucket"}, {"path", "abc def"}},
"gs:
}
TEST(GcsKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Invalid GCS bucket name: \"bucket:xyz\""));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("gs:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid GCS path.*"));
}
TEST(GcsKeyValueStoreTest, BatchRead) {
auto mock_transport = std::make_shared<MyMockTransport>();
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
GCSMockStorageBucket bucket("my-bucket");
mock_transport->buckets_.push_back(&bucket);
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", kDriver}, {"bucket", "my-bucket"}}, context)
.result());
tensorstore::internal::BatchReadGenericCoalescingTestOptions options;
options.coalescing_options = tensorstore::internal_kvstore_batch::
kDefaultRemoteStorageCoalescingOptions;
options.metric_prefix = "/tensorstore/kvstore/gcs/";
tensorstore::internal::TestBatchReadGenericCoalescing(store, options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/gcs_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/gcs_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
65c4ba39-beed-4193-b33f-f21f494b92c6 | cpp | tensorflow/tensorflow | get_dimension_size | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/get_dimension_size.cc | third_party/xla/xla/tests/get_dimension_size_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/get_dimension_size.h"
#include <cstdint>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
class LeagalizeDimensionSizeOp
: public OpConversionPattern<mhlo::GetDimensionSizeOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::GetDimensionSizeOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
ImplicitLocOpBuilder builder(op.getLoc(), rewriter);
auto operand_type = llvm::cast<ShapedType>(op.getOperand().getType());
auto shaped_op_type =
RankedTensorType::get({operand_type.getRank()}, rewriter.getI64Type());
Value shape_op = rewriter.create<TFL::ShapeOp>(op.getLoc(), shaped_op_type,
op.getOperand());
Value size = BuildIntArrayConstOp<arith::ConstantOp>(builder, rewriter, {1},
rewriter.getI64Type());
auto begin = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter,
llvm::SmallVector<int64_t>({static_cast<int64_t>(op.getDimension())}),
rewriter.getI64Type());
auto slice_type = RankedTensorType::get({1}, rewriter.getI64Type());
Value slice = rewriter.create<TFL::SliceOp>(op.getLoc(), slice_type,
shape_op, begin, size);
auto op_el_type = llvm::cast<ShapedType>(op.getType()).getElementType();
if (op_el_type != slice_type.getElementType()) {
slice = rewriter.create<TFL::CastOp>(op->getLoc(),
slice_type.clone(op_el_type), slice);
}
rewriter.replaceOpWithNewOp<TFL::SqueezeOp>(op, op.getType(), slice,
rewriter.getI64ArrayAttr({0}));
return success();
}
};
}
void PopulateGetDimensionSizePatterns(MLIRContext* ctx,
RewritePatternSet& patterns,
ConversionTarget& target) {
target.addIllegalOp<mhlo::GetDimensionSizeOp>();
patterns.add<LeagalizeDimensionSizeOp>(ctx);
}
} | #include <utility>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
void DisableAllHloPasses(HloModule& module) {
auto debug_options = module.config().debug_options();
debug_options.set_xla_disable_all_hlo_passes(true);
module.mutable_config().set_debug_options(debug_options);
}
class GetDimensionSizeTest : public HloTestBase {};
TEST_F(GetDimensionSizeTest, CorrectComputation) {
const char* const kModuleStr = R"(
HloModule a_inference_call_110__.55
ENTRY %a_inference_call_110__.55 (arg0.1: f32[1,8], arg1.2: f32[8], arg2.3: f32[8]) -> s32[] {
%constant.37 = f32[] constant(1e-12)
%broadcast.38 = f32[1,1]{1,0} broadcast(f32[] %constant.37), dimensions={}
%arg0.1 = f32[1,8]{1,0} parameter(0), parameter_replication={false}
%reshape.4 = f32[1,8]{1,0} reshape(f32[1,8]{1,0} %arg0.1)
%convert.5 = f32[1,8]{1,0} convert(f32[1,8]{1,0} %reshape.4)
%constant.6 = f32[] constant(0)
%convert.7 = f32[] convert(f32[] %constant.6)
ROOT %get-dimension-size.13 = s32[] get-dimension-size(f32[1,8]{1,0} %convert.5), dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0.01, 0.01}));
}
TEST_F(GetDimensionSizeTest,
DISABLED_ON_INTERPRETER(DISABLED_ON_GPU(
DISABLED_ON_TPU(ReturnsErrorWhenHloPassesDisabled)))) {
const char* const kModuleStr = R"(
HloModule m
ENTRY %test {
%arg0 = f32[1,8] parameter(0)
ROOT %get-dimension-size.0 = s32[] get-dimension-size(%arg0),
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
DisableAllHloPasses(*module);
Literal arg0 =
LiteralUtil::CreateR1<float>({0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0});
auto status_or_result = Execute(std::move(module), {&arg0});
EXPECT_EQ(status_or_result.status().code(), absl::StatusCode::kUnimplemented);
EXPECT_THAT(
status_or_result.status().message(),
::testing::HasSubstr("GetDimensionSize should be rewritten for CPU"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/get_dimension_size.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/get_dimension_size_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd5d9ea9-18fe-4df4-93c5-bc52f4a12e63 | cpp | tensorflow/tensorflow | plugin_program_serdes | third_party/xla/xla/python/ifrt/plugin_program_serdes.cc | third_party/xla/xla/python/ifrt/plugin_program_serdes_test.cc | #include <memory>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/plugin_program.h"
#include "xla/python/ifrt/serdes.h"
namespace xla {
namespace ifrt {
namespace {
constexpr absl::string_view kSerializationPrefix =
"__serialized_plugin_program ";
class PluginProgramSerDes
: public llvm::RTTIExtends<PluginProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::PluginProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
return absl::StrCat(kSerializationPrefix,
llvm::cast<PluginProgram>(serializable).data);
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
if (!absl::StartsWith(serialized, kSerializationPrefix)) {
return absl::InvalidArgumentError(
absl::StrCat("Bad serialized ", type_name()));
}
absl::string_view data(serialized);
data.remove_prefix(kSerializationPrefix.size());
auto result = std::make_unique<PluginProgram>();
result->data = data;
return result;
}
static char ID;
};
[[maybe_unused]] char PluginProgramSerDes::ID = 0;
bool register_plugin_program_serdes = ([]() {
RegisterSerDes<PluginProgram>(
std::make_unique<PluginProgramSerDes>());
}(), true);
class PluginCompileOptionsSerDes
: public llvm::RTTIExtends<PluginCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::PluginCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
return std::make_unique<PluginCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char PluginCompileOptionsSerDes::ID = 0;
bool register_plugin_compile_options_serdes = ([]() {
RegisterSerDes<PluginCompileOptions>(
std::make_unique<PluginCompileOptionsSerDes>());
}(), true);
}
}
} | #include <memory>
#include <gtest/gtest.h>
#include "xla/python/ifrt/plugin_program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace ifrt {
namespace {
TEST(PluginProgramSerDesTest, RoundTrip) {
PluginProgram orig;
orig.data = "foo";
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PluginProgram> deserialized_program,
Deserialize<PluginProgram>(serialized, nullptr));
EXPECT_EQ(deserialized_program->data, "foo");
}
TEST(PluginCompileOptionsSerDesTest, RoundTrip) {
PluginCompileOptions orig;
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_EXPECT_OK(
Deserialize<PluginCompileOptions>(serialized, nullptr)
.status());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/plugin_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/plugin_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0083186-28e1-4ea5-95ad-1a8f341b1df3 | cpp | tensorflow/tensorflow | hlo_evaluator | third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc | third_party/xla/xla/hlo/evaluator/hlo_evaluator_test.cc | #include "xla/hlo/evaluator/hlo_evaluator.h"
#include <algorithm>
#include <atomic>
#include <cmath>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <random>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/memory/memory.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array2d.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator_typed_visitor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using primitive_util::NativeTypeOf;
template <typename OperandT>
absl::StatusOr<Literal> Compare(const Shape& shape, Comparison comparison,
LiteralSlice lhs_literal,
LiteralSlice rhs_literal) {
auto populate = [&](auto compare_op) -> absl::StatusOr<Literal> {
Literal result(shape);
TF_RETURN_IF_ERROR(result.PopulateParallel<bool>(
[&](absl::Span<const int64_t> multi_index, int ) {
auto lhs = lhs_literal.Get<OperandT>(multi_index);
auto rhs = rhs_literal.Get<OperandT>(multi_index);
if constexpr (is_specialized_floating_point_v<OperandT>) {
if (comparison.IsTotalOrder()) {
return compare_op(ToSignMagnitude(lhs), ToSignMagnitude(rhs));
}
}
return compare_op(lhs, rhs);
}));
return std::move(result);
};
switch (comparison.GetDirection()) {
case ComparisonDirection::kEq:
return populate([](auto lhs, auto rhs) { return lhs == rhs; });
case ComparisonDirection::kNe:
return populate([](auto lhs, auto rhs) { return lhs != rhs; });
case ComparisonDirection::kGe:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs >= rhs; });
}
break;
case ComparisonDirection::kGt:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs > rhs; });
}
break;
case ComparisonDirection::kLe:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs <= rhs; });
}
break;
case ComparisonDirection::kLt:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs < rhs; });
}
break;
}
LOG(FATAL) << "unhandled direction for conversion to Comparison: "
<< comparison.ToString();
}
std::optional<bool> GetInstructionStaticValueAsBool(
const HloInstruction* instruction) {
HloEvaluator evaluator;
absl::StatusOr<Literal> static_value =
evaluator.Evaluate(instruction, {},
true);
if (static_value.ok()) {
return static_value->GetFirstElement<bool>();
}
return std::nullopt;
}
template <PrimitiveType kType>
struct PopulateParallelImpl {
using NativeT = NativeTypeOf<kType>;
static absl::Status Run(
Literal& literal,
absl::FunctionRef<Literal(absl::Span<const int64_t>, int)>
literal_generator) {
return literal.PopulateParallel<NativeT>(
[&literal_generator](absl::Span<const int64_t> output_index,
int thread_id) {
return literal_generator(output_index, thread_id)
.template Get<NativeT>({});
});
}
};
template <PrimitiveType kType>
struct PopulateImpl {
using NativeT = NativeTypeOf<kType>;
static absl::Status Run(
Literal& literal,
absl::FunctionRef<Literal(absl::Span<const int64_t>)> literal_generator) {
return literal.Populate<NativeT>(
[&literal_generator](absl::Span<const int64_t> output_index) {
return literal_generator(output_index).template Get<NativeT>({});
});
}
};
template <template <PrimitiveType> typename Trait, typename F>
absl::Status Apply(Literal& literal, F&& literal_generator) {
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&, literal_generator = std::forward<F>(literal_generator)](
auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return Trait<primitive_type_constant>::Run(
literal, std::move(literal_generator));
}
LOG(FATAL) << "Unhandled primitive type "
<< literal.shape().element_type();
},
literal.shape().element_type());
}
absl::Status MakeEvalErrorDueToParamOrInfeed(
const HloInstruction& eval_instruction) {
absl::Status error = absl::FailedPreconditionError(absl::StrCat(
"Failed to evaluate instruction (", eval_instruction.name(),
") since it depends on infeed or parameters to its parent computation (",
eval_instruction.parent()->name(), ")."));
std::string error_payload;
error_payload.resize(sizeof(internal::EvalErrorDetail));
absl::little_endian::Store32(
const_cast<char*>(error_payload.data()),
static_cast<uint32_t>(
internal::EvalErrorDetail::kDynamicValueDependence));
error.SetPayload(internal::kEvalErrorDetailUrl, absl::Cord(error_payload));
return error;
}
struct DynamicOrStaticInteger {
std::optional<int64_t> static_value;
bool is_dynamic() const { return !static_value.has_value(); }
std::string ToString() const {
return is_dynamic() ? std::string("DYNAMIC") : absl::StrCat(*static_value);
}
};
std::optional<DynamicOrStaticInteger> GetInstructionValueAsInteger(
const HloInstruction* instruction,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
HloEvaluator evaluator;
absl::StatusOr<Literal> static_value =
evaluator.Evaluate(instruction, precomputed_analyses,
true);
if (static_value.ok()) {
if (instruction->shape().element_type() == PrimitiveType::PRED) {
return DynamicOrStaticInteger{
static_cast<int64_t>(static_value->GetFirstElement<bool>())};
} else {
return DynamicOrStaticInteger{static_value->GetFirstInteger()};
}
}
std::optional<internal::EvalErrorDetail> eval_error_detail =
internal::ParseEvalErrorDetail(static_value.status());
if (eval_error_detail.has_value() &&
*eval_error_detail ==
internal::EvalErrorDetail::kDynamicValueDependence) {
return DynamicOrStaticInteger{std::nullopt};
}
return std::nullopt;
}
struct ParamIndexAndValue {
std::optional<int64_t> param_index;
std::optional<DynamicOrStaticInteger> value;
bool IsValid() const { return param_index.has_value() || value.has_value(); }
std::string ToString() const {
return absl::StrCat(
"param_index:",
!param_index.has_value() ? std::string("UNKNOWN")
: absl::StrCat(*param_index),
",", "value:",
!value.has_value() ? std::string("UNKONWN") : value->ToString());
}
};
std::optional<ParamIndexAndValue> TryParsingInstructionAsParameterAndInteger(
const HloInstruction* instruction,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (instruction->opcode() == HloOpcode::kCopy) {
return TryParsingInstructionAsParameterAndInteger(instruction->operand(0),
precomputed_analyses);
}
if (instruction->opcode() == HloOpcode::kCopyDone) {
return TryParsingInstructionAsParameterAndInteger(
instruction->operand(0)->operand(1), precomputed_analyses);
}
ParamIndexAndValue result;
if (Match(instruction, match::GetTupleElement().WithOperand(
0, match::Parameter().WithParameterNum(0)))) {
result.param_index = instruction->tuple_index();
}
std::optional<DynamicOrStaticInteger> integer_value =
GetInstructionValueAsInteger(instruction, precomputed_analyses);
result.value = std::move(integer_value);
if (!result.IsValid()) {
return std::nullopt;
}
return std::optional<ParamIndexAndValue>(std::move(result));
}
struct WhileCondComparison {
ComparisonDirection comparison_direction;
ParamIndexAndValue lhs;
ParamIndexAndValue rhs;
std::string ToString() const {
return absl::StrCat("WhileCondComparison{", "LHS:{", lhs.ToString(),
"},RHS:{", rhs.ToString(), "}}");
}
};
using WhileCondComparisonOrNoOp =
std::variant<WhileCondComparison, ParamIndexAndValue>;
std::optional<ParamIndexAndValue> ParseComparisonOperand(
const HloInstruction* operand,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (operand->opcode() == HloOpcode::kCopy ||
operand->opcode() == HloOpcode::kCopyStart ||
operand->opcode() == HloOpcode::kCopyDone) {
return ParseComparisonOperand(operand->operand(0), precomputed_analyses);
}
std::optional<int64_t> param_index;
if (Match(operand, match::GetTupleElement().WithOperand(
0, match::Parameter().WithParameterNum(0)))) {
param_index = operand->tuple_index();
}
std::optional<DynamicOrStaticInteger> operand_value =
GetInstructionValueAsInteger(operand, precomputed_analyses);
if (!param_index.has_value() && !operand_value.has_value()) {
return std::nullopt;
}
return ParamIndexAndValue{param_index, operand_value};
}
std::optional<WhileCondComparisonOrNoOp> PatternMatchLoopCondComparison(
const HloInstruction* comparison,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
CHECK_EQ(comparison->opcode(), HloOpcode::kCompare);
std::optional<ParamIndexAndValue> lhs =
ParseComparisonOperand(comparison->operand(0), precomputed_analyses);
std::optional<ParamIndexAndValue> rhs =
ParseComparisonOperand(comparison->operand(1), precomputed_analyses);
if (!lhs.has_value() || !rhs.has_value()) {
return std::nullopt;
}
return WhileCondComparison{comparison->comparison_direction(),
*std::move(lhs), *std::move(rhs)};
}
std::optional<WhileCondComparisonOrNoOp> PatternMatchLoopCondRoot(
const HloInstruction* loop_cond_root,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (loop_cond_root->opcode() == HloOpcode::kCopy) {
return PatternMatchLoopCondRoot(loop_cond_root->operand(0),
precomputed_analyses);
}
if (loop_cond_root->opcode() == HloOpcode::kCopyDone) {
return PatternMatchLoopCondRoot(loop_cond_root->operand(0)->operand(1),
precomputed_analyses);
}
if (loop_cond_root->opcode() == HloOpcode::kCompare) {
return PatternMatchLoopCondComparison(loop_cond_root, precomputed_analyses);
}
if (Match(loop_cond_root, match::GetTupleElement().WithOperand(
0, match::Parameter().WithParameterNum(0)))) {
if (loop_cond_root->shape().element_type() != PrimitiveType::PRED &&
loop_cond_root->shape().rank() != 0) {
return std::nullopt;
}
return ParamIndexAndValue{{loop_cond_root->tuple_index()}};
}
if (Match(loop_cond_root,
match::GetTupleElement().WithOperand(
0, match::Call().WithNumOperands(1).WithOperand(
0, match::Parameter().WithParameterNum(0))))) {
const HloInstruction* call_instruction = loop_cond_root->operand(0);
const HloComputation* to_apply = call_instruction->to_apply();
const HloInstruction* to_apply_root = to_apply->root_instruction();
if (Match(to_apply_root, match::Tuple())) {
return PatternMatchLoopCondRoot(
to_apply_root->operand(loop_cond_root->tuple_index()),
precomputed_analyses);
}
}
if (Match(loop_cond_root,
match::GetTupleElement().WithOperand(0, match::Tuple()))) {
const HloInstruction* new_cond_root =
loop_cond_root->operand(0)->operand(loop_cond_root->tuple_index());
return PatternMatchLoopCondRoot(new_cond_root, precomputed_analyses);
}
return std::nullopt;
}
std::optional<DynamicOrStaticInteger> PatternMatchInductionVarUpdate(
const HloInstruction* induction_var_update, int64_t tuple_index,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (induction_var_update->opcode() == HloOpcode::kCopy) {
return PatternMatchInductionVarUpdate(induction_var_update->operand(0),
tuple_index, precomputed_analyses);
}
if (induction_var_update->opcode() == HloOpcode::kCopyDone) {
return PatternMatchInductionVarUpdate(
induction_var_update->operand(0)->operand(1), tuple_index,
precomputed_analyses);
}
std::optional<ParamIndexAndValue> update_param_index_and_value =
TryParsingInstructionAsParameterAndInteger(induction_var_update,
precomputed_analyses);
if (update_param_index_and_value.has_value()) {
if (update_param_index_and_value->param_index.has_value()) {
if (*update_param_index_and_value->param_index == tuple_index) {
VLOG(3) << "PatternMatchInductionVarUpdate, pattern: [induc_var].";
return DynamicOrStaticInteger{0};
} else {
VLOG(3)
<< "PatternMatchInductionVarUpdate, induction variable is set to "
"another parameter value. Parsed update: "
<< update_param_index_and_value->ToString();
return std::nullopt;
}
}
if (update_param_index_and_value->value.has_value() &&
!update_param_index_and_value->value->is_dynamic()) {
VLOG(3) << "PatternMatchInductionVarUpdate, induction variable is set to "
"a constant. Parsed update: "
<< update_param_index_and_value->ToString();
return std::nullopt;
}
}
if (induction_var_update->opcode() != HloOpcode::kAdd &&
induction_var_update->opcode() != HloOpcode::kSubtract) {
return std::nullopt;
}
bool negate_update = induction_var_update->opcode() == HloOpcode::kSubtract;
const HloInstruction* update_lhs = induction_var_update->operand(0);
VLOG(3) << "PatternMatchInductionVarUpdate, LHS: " << update_lhs->ToString();
std::optional<ParamIndexAndValue> update_lhs_param_index_and_value =
TryParsingInstructionAsParameterAndInteger(update_lhs,
precomputed_analyses);
const HloInstruction* update_rhs = induction_var_update->operand(1);
VLOG(3) << "PatternMatchInductionVarUpdate, RHS: " << update_rhs->ToString();
std::optional<ParamIndexAndValue> update_rhs_param_index_and_value =
TryParsingInstructionAsParameterAndInteger(update_rhs,
precomputed_analyses);
if (!update_lhs_param_index_and_value.has_value() ||
!update_lhs_param_index_and_value->value.has_value() ||
!update_rhs_param_index_and_value.has_value() ||
!update_rhs_param_index_and_value->value.has_value()) {
VLOG(3) << "PatternMatchInductionVarUpdate, failed to parse operands. "
"Induction var update instruction: "
<< induction_var_update->ToString();
return std::nullopt;
}
VLOG(3) << "update_lhs: " << update_lhs->ToString();
VLOG(3) << "update_rhs: " << update_rhs->ToString();
if (update_lhs_param_index_and_value->param_index.has_value() &&
*update_lhs_param_index_and_value->param_index == tuple_index &&
update_lhs_param_index_and_value->value->is_dynamic()) {
if (update_rhs_param_index_and_value->value->is_dynamic()) {
return update_rhs_param_index_and_value->value;
}
int64_t update_value =
*update_rhs_param_index_and_value->value->static_value;
return negate_update
? DynamicOrStaticInteger{-update_value}
: DynamicOrStaticInteger{update_value};
}
if (update_rhs_param_index_and_value->param_index.has_value() &&
*update_rhs_param_index_and_value->param_index == tuple_index &&
update_rhs_param_index_and_value->value->is_dynamic() && !negate_update) {
return update_lhs_param_index_and_value->value;
}
VLOG(3) << "Failed to pattern match induction variable update.";
return std::nullopt;
}
std::optional<DynamicOrStaticInteger>
PatternMatchInductionVarUpdateFromLoopBodyRoot(
const HloInstruction* loop_body_root, int64_t tuple_index,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (loop_body_root->opcode() != HloOpcode::kTuple ||
loop_body_root->operand_count() <= tuple_index) {
return std::nullopt;
}
const HloInstruction* induction_var_update =
loop_body_root->operand(tuple_index);
return PatternMatchInductionVarUpdate(induction_var_update, tuple_index,
precomputed_analyses);
}
std::optional<bool> PatternMatchLoopCondVarOverride(
const HloInstruction* loop_body_root, int64_t tuple_index) {
if (!Match(loop_body_root, match::Tuple()) ||
loop_body_root->operand_count() <= tuple_index) {
return std::nullopt;
}
const HloInstruction* cond_var_override =
loop_body_root->operand(tuple_index);
return GetInstructionStaticValueAsBool(cond_var_override);
}
std::optional<DynamicOrStaticInteger> EvaluateWhileLoopParamInitValue(
const HloInstruction* param_instruction, int64_t tuple_index) {
if (param_instruction->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
const HloInstruction* element_instruction =
param_instruction->operand(tuple_index);
return GetInstructionValueAsInteger(element_instruction,
{});
}
}
namespace internal {
constexpr absl::string_view kEvalErrorDetailUrl = "EvalErrorDetailUrl";
std::optional<EvalErrorDetail> ParseEvalErrorDetail(const absl::Status& error) {
auto error_detail = error.GetPayload(kEvalErrorDetailUrl);
if (!error_detail.has_value() || error_detail->empty()) {
return std::nullopt;
}
return static_cast<EvalErrorDetail>(
absl::little_endian::Load32(error_detail->Flatten().data()));
}
}
std::optional<ParsedWhileLoop> HandleNoopLoopCondition(
const ParamIndexAndValue& parameter_index_and_value,
const HloInstruction* while_operand, const HloComputation* while_body) {
CHECK(parameter_index_and_value.param_index.has_value());
int64_t loop_cond_var_index = *parameter_index_and_value.param_index;
std::optional<DynamicOrStaticInteger> noop_value =
EvaluateWhileLoopParamInitValue(while_operand, loop_cond_var_index);
if (noop_value.has_value()) {
if (noop_value->is_dynamic()) {
return kParsedDynamicWhileLoop;
} else if (*noop_value->static_value == 0) {
return ParsedWhileLoop{
ParsedStaticWhileLoop{0,
loop_cond_var_index,
0,
0,
0}};
}
std::optional<bool> updated_loop_cond_var = PatternMatchLoopCondVarOverride(
while_body->root_instruction(), loop_cond_var_index);
if (updated_loop_cond_var.has_value()) {
if (!*updated_loop_cond_var) {
return ParsedWhileLoop{
ParsedStaticWhileLoop{1,
loop_cond_var_index,
0,
1,
1}};
} else {
return ParsedWhileLoop{
ParsedStaticWhileLoop{-1,
loop_cond_var_index,
0,
0,
1}};
}
}
}
return std::nullopt;
}
int64_t ComputeTripCountFromComparison(int64_t init, int64_t bound,
int64_t update,
bool comparison_with_equal) {
if (comparison_with_equal && init > bound) {
return 0;
}
if (!comparison_with_equal && init >= bound) {
return 0;
}
int64_t distance = bound - init;
int64_t trip_count = (distance + update - 1) / update;
CHECK_GE(trip_count, 0);
if (comparison_with_equal && (bound - init) % update == 0) {
trip_count += 1;
}
return trip_count;
}
std::optional<ParsedWhileLoop> HandleStaticLoopComparison(
int64_t lhs, int64_t rhs, Comparison::Direction comparison_direction) {
if ((comparison_direction == Comparison::Direction::kLt && lhs < rhs) ||
(comparison_direction == Comparison::Direction::kLe && lhs <= rhs) ||
(comparison_direction == Comparison::Direction::kGt && lhs > rhs) ||
(comparison_direction == Comparison::Direction::kGe && lhs >= rhs) ||
(comparison_direction == Comparison::Direction::kEq && lhs == rhs) ||
(comparison_direction == Comparison::Direction::kNe && lhs != rhs)) {
return ParsedWhileLoop{ParsedStaticWhileLoop{-1,
-1,
0,
0,
1}};
}
return ParsedWhileLoop{ParsedStaticWhileLoop{0,
-1,
0,
0,
0}};
}
std::optional<ParsedWhileLoop> PatternMatchParseWhileLoop(
const HloInstruction* while_op,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
VLOG(3) << "PatternMatchParseWhileLoop, while_op: " << while_op->name();
const HloComputation* while_cond = while_op->while_condition();
const HloComputation* while_body = while_op->while_body();
const HloInstruction* while_operand = while_op->operand(0);
std::optional<WhileCondComparisonOrNoOp> loop_comparison_or_noop =
PatternMatchLoopCondRoot(while_cond->root_instruction(),
precomputed_analyses);
if (!loop_comparison_or_noop.has_value()) {
return std::nullopt;
}
if (loop_comparison_or_noop->index() == 1) {
return HandleNoopLoopCondition(
std::get<ParamIndexAndValue>(*loop_comparison_or_noop), while_operand,
while_body);
}
CHECK_EQ(loop_comparison_or_noop->index(), 0);
WhileCondComparison loop_comparison =
std::get<WhileCondComparison>(*loop_comparison_or_noop);
CHECK(loop_comparison.lhs.IsValid() && loop_comparison.rhs.IsValid());
if (while_operand->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
if (!loop_comparison.lhs.value.has_value() ||
!loop_comparison.rhs.value.has_value()) {
return std::nullopt;
}
CHECK(loop_comparison.lhs.value.has_value());
CHECK(loop_comparison.rhs.value.has_value());
VLOG(3) << loop_comparison.ToString();
if (loop_comparison.lhs.value->is_dynamic() &&
loop_comparison.rhs.value->is_dynamic()) {
VLOG(3) << "Both operands of the loop condition comparison are dynamic.";
return std::nullopt;
}
CHECK(!loop_comparison.lhs.value->is_dynamic() ||
!loop_comparison.rhs.value->is_dynamic());
if (!loop_comparison.lhs.value->is_dynamic() &&
!loop_comparison.rhs.value->is_dynamic()) {
int64_t lhs_value = *loop_comparison.lhs.value->static_value;
int64_t rhs_value = *loop_comparison.rhs.value->static_value;
Comparison::Direction comparison_direction =
loop_comparison.comparison_direction;
return HandleStaticLoopComparison(lhs_value, rhs_value,
comparison_direction);
}
std::optional<DynamicOrStaticInteger> induction_var_init;
std::optional<DynamicOrStaticInteger> induction_var_update;
bool lhs_is_induction_var = true;
if (loop_comparison.lhs.value->is_dynamic()) {
if (loop_comparison.lhs.param_index.has_value()) {
VLOG(3) << "Comparison LHS is induction variable.";
induction_var_init = EvaluateWhileLoopParamInitValue(
while_operand, *loop_comparison.lhs.param_index);
induction_var_update = PatternMatchInductionVarUpdateFromLoopBodyRoot(
while_body->root_instruction(), *loop_comparison.lhs.param_index,
precomputed_analyses);
lhs_is_induction_var = true;
}
} else {
CHECK(loop_comparison.rhs.value->is_dynamic());
if (loop_comparison.rhs.param_index.has_value()) {
VLOG(3) << "Comparison RHS is induction variable.";
induction_var_init = EvaluateWhileLoopParamInitValue(
while_operand, *loop_comparison.rhs.param_index);
induction_var_update = PatternMatchInductionVarUpdateFromLoopBodyRoot(
while_body->root_instruction(), *loop_comparison.rhs.param_index,
precomputed_analyses);
lhs_is_induction_var = false;
}
}
if (!induction_var_init.has_value() || !induction_var_update.has_value()) {
return std::nullopt;
}
VLOG(3) << "induction_var_init: " << induction_var_init->ToString();
VLOG(3) << "induction_var_update: " << induction_var_update->ToString();
if (induction_var_init->is_dynamic() || induction_var_update->is_dynamic()) {
return kParsedDynamicWhileLoop;
}
int64_t init_value = *induction_var_init->static_value;
int64_t update_value = *induction_var_update->static_value;
Comparison::Direction comparison_direction =
loop_comparison.comparison_direction;
ParsedWhileLoop parsed_static_while_loop = ParsedWhileLoop{
ParsedStaticWhileLoop{0,
-1,
init_value,
update_value,
-1}};
if (lhs_is_induction_var) {
CHECK(loop_comparison.rhs.value.has_value() &&
!loop_comparison.rhs.value->is_dynamic());
int64_t bound = *loop_comparison.rhs.value->static_value;
parsed_static_while_loop.static_while_loop->induction_var_index =
*loop_comparison.lhs.param_index;
parsed_static_while_loop.static_while_loop->loop_bound = bound;
if (update_value > 0 &&
(comparison_direction == Comparison::Direction::kLt ||
comparison_direction == Comparison::Direction::kLe)) {
int64_t trip_count = ComputeTripCountFromComparison(
init_value, bound, update_value,
comparison_direction == Comparison::Direction::kLe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
if (update_value < 0 &&
(comparison_direction == Comparison::Direction::kGt ||
comparison_direction == Comparison::Direction::kGe)) {
int64_t trip_count = ComputeTripCountFromComparison(
bound, init_value, -update_value,
comparison_direction == Comparison::Direction::kGe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
return std::nullopt;
}
CHECK(loop_comparison.lhs.value.has_value() &&
!loop_comparison.lhs.value->is_dynamic());
int64_t bound = *loop_comparison.lhs.value->static_value;
parsed_static_while_loop.static_while_loop->induction_var_index =
*loop_comparison.rhs.param_index;
parsed_static_while_loop.static_while_loop->loop_bound = bound;
if (update_value > 0 &&
(comparison_direction == Comparison::Direction::kGt ||
comparison_direction == Comparison::Direction::kGe)) {
int64_t trip_count = ComputeTripCountFromComparison(
init_value, bound, update_value,
comparison_direction == Comparison::Direction::kGe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
if (update_value < 0 &&
(comparison_direction == Comparison::Direction::kLt ||
comparison_direction == Comparison::Direction::kLe)) {
int64_t trip_count = ComputeTripCountFromComparison(
bound, init_value, -update_value,
comparison_direction == Comparison::Direction::kLe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
return std::nullopt;
}
HloEvaluator::HloEvaluator(int64_t max_loop_iterations)
: max_loop_iterations_(max_loop_iterations) {
for (int i = PrimitiveType_MIN; i < PrimitiveType_ARRAYSIZE; ++i) {
if (!primitive_util::IsArrayType(PrimitiveType{i})) {
continue;
}
primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type) {
if constexpr (primitive_util::IsArrayType(primitive_type)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
if constexpr (primitive_util::IsSignedIntegralType(
primitive_type)) {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT, int64_t>>(
this);
} else if constexpr (primitive_util::IsUnsignedIntegralType(
primitive_type)) {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT, uint64_t>>(
this);
} else if constexpr (primitive_util::IsFloatingPointType(
primitive_type) &&
sizeof(NativeT) < sizeof(float)) {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT, float>>(
this);
} else {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT>>(this);
}
}
},
PrimitiveType{i});
}
typed_visitors_[TUPLE] =
std::make_unique<ConstFunctionVisitor>([](const HloInstruction*) {
return Unimplemented(
"HloEvaluatorTypedVisitor: unhandled primitive type: TUPLE.");
});
typed_visitors_[OPAQUE_TYPE] =
std::make_unique<ConstFunctionVisitor>([](const HloInstruction*) {
return Unimplemented(
"HloEvaluatorTypedVisitor: unhandled primitive type: OPAQUE_TYPE.");
});
typed_visitors_[TOKEN] =
std::make_unique<ConstFunctionVisitor>([](const HloInstruction*) {
return Unimplemented(
"HloEvaluatorTypedVisitor: unhandled primitive type: TOKEN.");
});
}
absl::StatusOr<Literal> HloEvaluator::Evaluate(
const HloComputation& computation,
absl::Span<const Literal* const> arg_literals) {
CHECK(computation.parent() != nullptr);
XLA_VLOG_LINES(
2, "HloEvaluator::Evaluate computation:\n" + computation.ToString());
OnEvaluateComputation(computation);
if (arg_literals.size() != computation.num_parameters()) {
return InvalidArgument(
"Expected %d argument%s, but got %d.", computation.num_parameters(),
computation.num_parameters() == 1 ? "" : "s", arg_literals.size());
}
for (int64_t i = 0; i < arg_literals.size(); ++i) {
const auto& computation_shape =
computation.parameter_instruction(i)->shape();
const auto& arg_shape = arg_literals[i]->shape();
if (!Shape::Equal().MinorToMajorOnlyInLayout()(computation_shape,
arg_shape)) {
return InvalidArgument(
"Shape mismatch at parameter %d. Computation expected %s, but arg "
"was %s.",
i, ShapeUtil::HumanStringWithLayout(computation_shape),
ShapeUtil::HumanStringWithLayout(arg_shape));
}
}
evaluated_.clear();
arg_literals_.clear();
call_graph_cache_.reset();
tuple_points_to_analysis_cache_.reset();
for (const auto& literal_ptr : arg_literals) {
arg_literals_.push_back(&*literal_ptr);
}
if (computation.parent()->config().seed()) {
seed_ = computation.parent()->config().seed();
} else {
static std::atomic<uint64_t> global_seed{std::random_device()()};
seed_ = global_seed.fetch_add(1);
}
engine_.seed(seed_);
TF_RETURN_IF_ERROR(computation.Accept(this));
const Literal& result =
GetEvaluatedLiteralFor(computation.root_instruction());
if (VLOG_IS_ON(100)) {
for (const HloInstruction* instr : computation.instructions()) {
VLOG(100) << instr->name() << " = " << GetEvaluatedLiteralFor(instr);
}
}
if (!result.IsKnown()) {
return MakeEvalErrorDueToParamOrInfeed(*computation.root_instruction());
}
return result.Clone();
}
absl::StatusOr<Literal> HloEvaluator::Evaluate(
const HloInstruction* instruction, PrecomputedAnalyses precomputed_analyses,
bool recursively_evaluate_nonconstant_operands) {
arg_literals_.clear();
evaluated_.clear();
call_graph_cache_.reset();
tuple_points_to_analysis_cache_.reset();
auto enable_partial_evaluation_cleanup =
absl::MakeCleanup([this] { enable_partial_evaluation_ = false; });
enable_partial_evaluation_ = recursively_evaluate_nonconstant_operands;
TF_RETURN_IF_ERROR(
EvaluateInternal(instruction, precomputed_analyses, {},
recursively_evaluate_nonconstant_operands));
const Literal& result = GetEvaluatedLiteralFor(instruction);
if (!result.IsKnown()) {
return MakeEvalErrorDueToParamOrInfeed(*instruction);
}
return result.Clone();
}
bool HloEvaluator::TryEvaluate(const HloInstruction* instruction,
Literal* result,
bool recursively_evaluate_nonconstant_operands) {
CHECK(result != nullptr);
auto result_or = Evaluate(instruction, {},
recursively_evaluate_nonconstant_operands);
if (!result_or.ok()) {
VLOG(1) << "TryEvaluate failed:" << result_or.status();
return false;
}
*result = std::move(result_or).value();
return true;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateWithSubstitutions(
const HloInstruction* instruction,
const absl::flat_hash_map<const HloInstruction*, const LiteralBase*>&
substitutions) {
std::vector<std::unique_ptr<HloInstruction>> owned_operands;
for (const HloInstruction* operand : instruction->operands()) {
auto it = substitutions.find(operand);
if (it == substitutions.end()) {
owned_operands.push_back(operand->Clone());
} else {
owned_operands.push_back(
HloInstruction::CreateConstant(it->second->Clone()));
}
}
std::vector<HloInstruction*> operands;
operands.reserve(owned_operands.size());
for (auto& operand : owned_operands) {
operands.push_back(operand.get());
}
std::unique_ptr<HloInstruction> cloned_instruction =
instruction->CloneWithNewOperands(instruction->shape(), operands);
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseBinaryOp(
HloOpcode opcode, const Literal& lhs, const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateBinary(lhs.shape(), opcode, lhs_instr.get(),
rhs_instr.get());
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseTernaryOp(
HloOpcode opcode, const Literal& lhs, const Literal& rhs,
const Literal& ehs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
std::unique_ptr<HloInstruction> ehs_instr =
HloInstruction::CreateConstant(ehs.Clone());
TF_ASSIGN_OR_RETURN(auto output_shape,
ShapeInference::InferTernaryOpShape(
opcode, lhs.shape(), rhs.shape(), ehs.shape()));
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateTernary(output_shape, opcode, lhs_instr.get(),
rhs_instr.get(), ehs_instr.get());
return Evaluate(cloned_instruction.get());
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseCompareOp(
ComparisonDirection direction, const Literal& lhs, const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(lhs.shape(), PRED), lhs_instr.get(),
rhs_instr.get(), direction);
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseUnaryOp(
HloOpcode opcode, const Literal& operand) {
std::unique_ptr<HloInstruction> operand_instr =
HloInstruction::CreateConstant(operand.Clone());
TF_ASSIGN_OR_RETURN(Shape inferred_shape, ShapeInference::InferUnaryOpShape(
opcode, operand.shape()));
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateUnary(inferred_shape, opcode, operand_instr.get());
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateDotOp(
const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, const Literal& lhs,
const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
TF_ASSIGN_OR_RETURN(
Shape dot_shape,
ShapeInference::InferDotOpShape(lhs.shape(), rhs.shape(), dim_numbers,
std::nullopt));
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateDot(dot_shape, lhs_instr.get(), rhs_instr.get(),
dim_numbers, precision_config);
return Evaluate(cloned_instruction.get());
}
absl::Status HloEvaluator::EvaluateParameterFromCallerArgument(
const HloInstruction* parameter, const ShapeIndex& shape_index,
PrecomputedAnalyses analyses) {
CHECK(!evaluated_.contains(parameter));
const HloComputation* parent_computation = parameter->parent();
std::vector<HloInstruction*> computation_callers =
analyses.call_graph->GetComputationCallers(parent_computation);
if (computation_callers.size() != 1) {
return tsl::errors::FailedPrecondition(
"The computation ", parent_computation->name(), " is called by ",
computation_callers.size(),
" callers and thus its argument value "
"cannot be determined statically.");
}
const HloInstruction* computation_caller = computation_callers[0];
const HloInstruction* caller_operand = computation_caller->operand(0);
if (computation_caller->opcode() != HloOpcode::kWhile &&
computation_caller->opcode() != HloOpcode::kCall) {
return tsl::errors::FailedPrecondition(
"The computation ", parent_computation->name(), " is called by ",
"instruction ", computation_caller->name(),
", which is not yet supported.");
}
if (computation_caller->opcode() == HloOpcode::kWhile) {
HloComputation* while_body = computation_caller->while_body();
TF_ASSIGN_OR_RETURN(
const LogicalBuffer* logical_buffer,
analyses.tuple_points_to->GetBufferDefinedAt(
while_body->parameter_instruction(parameter->parameter_number()),
shape_index));
const TuplePointsToAnalysis::BufferAliasVector& buffer_aliases =
analyses.tuple_points_to->GetBufferAliases(*logical_buffer);
bool unchanged_in_return = false;
for (const BufferAlias& buffer_alias : buffer_aliases) {
if (buffer_alias.instruction() == while_body->root_instruction() &&
buffer_alias.index() == shape_index) {
unchanged_in_return = true;
}
}
if (!unchanged_in_return) {
return MakeEvalErrorDueToParamOrInfeed(*parameter);
}
}
TF_RETURN_IF_ERROR(
EvaluateInternal(caller_operand, analyses, shape_index, true));
const Literal& caller_operand_literal =
GetEvaluatedLiteralFor(caller_operand);
evaluated_[parameter] =
Literal::CreateFromShapeWithUnknownLeafArrays(parameter->shape());
TF_RETURN_IF_ERROR(evaluated_[parameter].CopyFrom(
caller_operand_literal, shape_index,
shape_index));
return absl::OkStatus();
}
std::vector<int64_t> HloEvaluator::GetS64Indices(
absl::Span<HloInstruction* const> start_indices) {
auto get_first_s64 = [&](const Literal& index) -> int64_t {
return primitive_util::PrimitiveTypeSwitch<int64_t>(
[&](auto primitive_type_constant) -> int64_t {
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant)) {
return static_cast<int64_t>(
index.GetFirstElement<NativeTypeOf<primitive_type_constant>>());
}
LOG(FATAL) << "GetS64Indices: unhandled primitive type for "
<< PrimitiveType_Name(index.shape().element_type());
},
index.shape().element_type());
};
std::vector<int64_t> start;
start.reserve(start_indices.size());
for (HloInstruction* index : start_indices) {
start.push_back(get_first_s64(GetEvaluatedLiteralFor(index)));
}
return start;
}
DimensionVector HloEvaluator::MakeDimMultipliers(const Shape& shape) {
DimensionVector v(shape.rank());
int64_t scale = 1;
for (auto dim : LayoutUtil::MinorToMajor(shape)) {
v[dim] = scale;
scale *= shape.dimensions(dim);
}
return v;
}
absl::Status HloEvaluator::EvaluateInternal(
const HloInstruction* instruction, PrecomputedAnalyses precomputed_analyses,
const ShapeIndex& shape_index,
bool recursively_evaluate_nonconstant_operands) {
if (IsAlreadyEvaluated(instruction, shape_index)) {
return absl::OkStatus();
}
if (!recursively_evaluate_nonconstant_operands) {
if (!hlo_query::AllOperandsAreConstants(*instruction)) {
return absl::FailedPreconditionError(
absl::StrCat("Not all operands are constants. Instruction: ",
instruction->ToString()));
}
} else {
if (instruction->opcode() == HloOpcode::kGetTupleElement) {
ShapeIndex new_shape_index = shape_index;
new_shape_index.push_front(instruction->tuple_index());
TF_RETURN_IF_ERROR(EvaluateInternal(
instruction->operand(0), precomputed_analyses, new_shape_index,
true));
} else if (instruction->opcode() == HloOpcode::kTuple &&
!shape_index.empty()) {
ShapeIndex new_shape_index = shape_index;
int64_t tuple_index = new_shape_index.front();
new_shape_index.pop_front();
TF_RETURN_IF_ERROR(
EvaluateInternal(instruction->operand(tuple_index),
precomputed_analyses, new_shape_index,
true));
} else if (instruction->opcode() == HloOpcode::kParameter) {
CallGraph* call_graph =
(precomputed_analyses.call_graph != nullptr)
? precomputed_analyses.call_graph
: std::invoke([this, instruction]() -> CallGraph* {
call_graph_cache_ =
CallGraph::Build(instruction->GetModule());
return call_graph_cache_.get();
});
TuplePointsToAnalysis* tuple_points_to_analysis =
(precomputed_analyses.tuple_points_to != nullptr)
? precomputed_analyses.tuple_points_to
: std::invoke([this, instruction]() -> TuplePointsToAnalysis* {
absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>>
tuple_points_to_analysis =
TuplePointsToAnalysis::Run(instruction->GetModule());
if (!tuple_points_to_analysis.ok()) {
return nullptr;
}
tuple_points_to_analysis_cache_ =
*std::move(tuple_points_to_analysis);
return tuple_points_to_analysis_cache_.get();
});
if (call_graph && tuple_points_to_analysis) {
absl::Status argument_eval_status = EvaluateParameterFromCallerArgument(
instruction, shape_index, {tuple_points_to_analysis, call_graph});
if (!argument_eval_status.ok()) {
VLOG(4) << "Failed to evaluate parameter " << instruction->name()
<< " from caller. Reason: " << argument_eval_status.message();
} else {
VLOG(4) << "Successfully evaluated parameter: "
<< instruction->name();
}
}
} else {
for (HloInstruction* operand : instruction->operands()) {
TF_RETURN_IF_ERROR(EvaluateInternal(
operand, precomputed_analyses, {},
true));
if ((!GetEvaluatedLiteralFor(operand).IsKnown() &&
instruction->opcode() != HloOpcode::kCopy &&
instruction->opcode() != HloOpcode::kCopyStart &&
instruction->opcode() != HloOpcode::kCopyDone &&
instruction->opcode() != HloOpcode::kAsyncStart &&
instruction->opcode() != HloOpcode::kAsyncUpdate &&
instruction->opcode() != HloOpcode::kAsyncDone &&
instruction->opcode() != HloOpcode::kWhile)) {
evaluated_[instruction] =
Literal::CreateFromShapeWithUnknownLeafArrays(
instruction->shape());
return absl::OkStatus();
}
}
}
}
visitor_shape_index_ = shape_index;
TF_RETURN_IF_ERROR(Preprocess(instruction));
TF_RETURN_IF_ERROR(instruction->Visit(this));
TF_RETURN_IF_ERROR(Postprocess(instruction));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleBitcast(const HloInstruction* bitcast) {
const Literal& operand_literal = GetEvaluatedLiteralFor(bitcast->operand(0));
Literal result(bitcast->shape());
TF_RET_CHECK(operand_literal.size_bytes() >= result.size_bytes());
memcpy(result.untyped_data(), operand_literal.untyped_data(),
result.size_bytes());
evaluated_[bitcast] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleBitcastConvert(const HloInstruction* convert) {
const HloInstruction* operand = convert->operand(0);
TF_ASSIGN_OR_RETURN(
Literal result,
GetEvaluatedLiteralFor(operand).BitcastConvert(convert->shape()));
evaluated_[convert] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleGetDimensionSize(
const HloInstruction* get_dimension_size) {
const HloInstruction* operand = get_dimension_size->operand(0);
int64_t dim = get_dimension_size->dimension();
if (dynamic_dimension_inference_ == nullptr) {
return InvalidArgument(
"Evaluator cannot evaluate get_dimension_size without "
"set_dynamic_dimension_inference.");
}
const HloInstruction* dynamic_size =
dynamic_dimension_inference_->GetDynamicSize(operand, {}, dim);
if (dynamic_size != nullptr) {
evaluated_[get_dimension_size] =
GetEvaluatedLiteralFor(dynamic_size).Clone();
return absl::OkStatus();
}
const Shape& shape = get_dimension_size->operand(0)->shape();
Literal output(ShapeUtil::MakeShape(S32, {}));
output.PopulateWithValue(
static_cast<int32_t>(shape.dimensions(get_dimension_size->dimension())));
evaluated_[get_dimension_size] = std::move(output);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSetDimensionSize(
const HloInstruction* set_dimension_size) {
const Literal& operand_literal =
GetEvaluatedLiteralFor(set_dimension_size->operand(0));
Literal result(set_dimension_size->shape());
memcpy(result.untyped_data(), operand_literal.untyped_data(),
operand_literal.size_bytes());
const Literal& size_literal =
GetEvaluatedLiteralFor(set_dimension_size->operand(1));
result.SetDynamicSize(set_dimension_size->dimension(),
size_literal.Get<int32_t>({}));
evaluated_[set_dimension_size] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleParameter(const HloInstruction* parameter) {
if (!IsAlreadyEvaluated(parameter, visitor_shape_index_)) {
if (!enable_partial_evaluation_) {
return tsl::errors::FailedPrecondition(
"Failed to evaluate instruction since its operands are unknown "
"or undetermined and partial evaluation is not enabled.");
}
evaluated_[parameter] =
Literal::CreateFromShapeWithUnknownLeafArrays(parameter->shape());
return absl::OkStatus();
}
if (!arg_literals_.empty()) {
CHECK_LT(parameter->parameter_number(), arg_literals_.size());
#ifndef NDEBUG
const Literal* input_literal = arg_literals_[parameter->parameter_number()];
VLOG(2) << "Parameter evaluated to: " << input_literal->ToString();
DCHECK(Shape::Equal().MinorToMajorOnlyInLayout()(parameter->shape(),
input_literal->shape()))
<< "parameter shape is: "
<< ShapeUtil::HumanStringWithLayout(parameter->shape())
<< ", but input literal shape is: "
<< ShapeUtil::HumanStringWithLayout(input_literal->shape());
#endif
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleInfeed(const HloInstruction* infeed) {
if (!enable_partial_evaluation_) {
return tsl::errors::FailedPrecondition(
"Failed to evaluate instruction since its operands are unknown "
"or undetermined and partial evaluation is not enabled.");
}
evaluated_[infeed] =
Literal::CreateFromShapeWithUnknownLeafArrays(infeed->shape());
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConstant(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleReshape(const HloInstruction* reshape) {
TF_ASSIGN_OR_RETURN(evaluated_[reshape],
GetEvaluatedLiteralFor(reshape->operand(0))
.Reshape(reshape->shape().dimensions()));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleTranspose(const HloInstruction* transpose) {
evaluated_[transpose] = GetEvaluatedLiteralFor(transpose->operand(0))
.Transpose(transpose->dimensions());
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConcatenate(
const HloInstruction* concatenate) {
absl::Span<HloInstruction* const> operands(concatenate->operands());
const Shape& reference_shape = operands[0]->shape();
CHECK(reference_shape.IsArray());
const int64_t rank = reference_shape.rank();
const int64_t concat_dim = concatenate->dimensions()[0];
CHECK_GE(concat_dim, 0);
CHECK_LT(concat_dim, rank);
DimensionVector concat_dimensions(reference_shape.dimensions().begin(),
reference_shape.dimensions().end());
for (int64_t i = 1; i < operands.size(); ++i) {
const Shape& operand_shape = operands[i]->shape();
CHECK(operand_shape.IsArray());
concat_dimensions[concat_dim] +=
ShapeUtil::GetDimension(operand_shape, concat_dim);
}
auto result_literal = LiteralUtil::CreateFromDimensions(
reference_shape.element_type(), concat_dimensions);
DimensionVector source_indices(rank, 0);
DimensionVector dest_indices(concat_dimensions.size(), 0);
for (auto operand : operands) {
const Shape& operand_shape = operand->shape();
TF_RETURN_IF_ERROR(result_literal.CopySliceFrom(
GetEvaluatedLiteralFor(operand), source_indices, dest_indices,
operand_shape.dimensions()));
dest_indices[concat_dim] +=
ShapeUtil::GetDimension(operand_shape, concat_dim);
}
evaluated_[concatenate] = std::move(result_literal);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleIsFinite(const HloInstruction* is_finite) {
auto operand = is_finite->operand(0);
auto elem_ty = operand->shape().element_type();
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or = ElementWiseUnaryOpImpl<bool, NativeT>(
is_finite,
[](NativeT elem_operand) {
return Eigen::numext::isfinite(elem_operand);
},
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[is_finite], std::move(result_or));
return absl::OkStatus();
}
return InvalidArgument(
"expected element type in shape to be floating point, but got: %s",
PrimitiveType_Name(elem_ty));
},
elem_ty);
}
absl::Status HloEvaluator::HandleReal(const HloInstruction* real) {
auto operand = real->operand(0);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or = ElementWiseUnaryOpImpl<NativeT, NativeT>(
real, [](NativeT elem_operand) { return elem_operand; },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[real], std::move(result_or));
return absl::OkStatus();
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or =
ElementWiseUnaryOpImpl<typename NativeT::value_type, NativeT>(
real,
[](NativeT elem_operand) { return std::real(elem_operand); },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[real], std::move(result_or));
return absl::OkStatus();
}
LOG(FATAL) << "HandleReal: unknown/unhandled primitive type: "
<< PrimitiveType_Name(operand->shape().element_type());
},
operand->shape().element_type());
}
absl::Status HloEvaluator::HandleImag(const HloInstruction* imag) {
auto operand = imag->operand(0);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or = ElementWiseUnaryOpImpl<NativeT, NativeT>(
imag, [](NativeT elem_operand) { return NativeT(0); },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[imag], std::move(result_or));
return absl::OkStatus();
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or =
ElementWiseUnaryOpImpl<typename NativeT::value_type, NativeT>(
imag,
[](NativeT elem_operand) { return std::imag(elem_operand); },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[imag], std::move(result_or));
return absl::OkStatus();
}
LOG(FATAL) << "HandleImag: unknown/unhandled primitive type: "
<< PrimitiveType_Name(operand->shape().element_type());
},
operand->shape().element_type());
}
absl::Status HloEvaluator::HandleComplex(const HloInstruction* complex) {
const Literal& real = GetEvaluatedLiteralFor(complex->operand(0));
const Literal& imag = GetEvaluatedLiteralFor(complex->operand(1));
TF_RET_CHECK(ShapeUtil::Compatible(real.shape(), imag.shape()));
Literal result(complex->shape());
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
TF_RETURN_IF_ERROR(result.Populate<NativeT>(
[&](absl::Span<const int64_t> multi_index) {
return NativeT(
real.Get<typename NativeT::value_type>(multi_index),
imag.Get<typename NativeT::value_type>(multi_index));
}));
evaluated_[complex] = std::move(result);
return absl::OkStatus();
}
LOG(FATAL) << "HandleComplex: unknown/unhandled primitive type: "
<< PrimitiveType_Name(complex->shape().element_type());
},
complex->shape().element_type());
}
absl::Status HloEvaluator::HandleCompare(const HloInstruction* compare) {
ComparisonDirection direction = compare->comparison_direction();
ComparisonOrder order = compare->comparison_order();
auto lhs = compare->operand(0);
auto rhs = compare->operand(1);
DCHECK(ShapeUtil::SameDimensions(compare->shape(), rhs->shape()) &&
ShapeUtil::SameDimensions(lhs->shape(), rhs->shape()));
TF_RET_CHECK(lhs->shape().element_type() == rhs->shape().element_type());
auto element_type = lhs->shape().element_type();
Comparison comparison(direction, element_type, order);
const Literal& lhs_literal = GetEvaluatedLiteralFor(lhs);
const Literal& rhs_literal = GetEvaluatedLiteralFor(rhs);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
TF_ASSIGN_OR_RETURN(evaluated_[compare],
Compare<NativeT>(compare->shape(), comparison,
lhs_literal, rhs_literal));
return absl::OkStatus();
}
LOG(FATAL) << "HandleCompare: unknown primitive type: "
<< PrimitiveType_Name(element_type);
},
element_type);
}
absl::Status HloEvaluator::HandleTuple(const HloInstruction* tuple) {
std::vector<const Literal*> operand_literals;
std::vector<Literal> operand_literal_values;
if (!visitor_shape_index_.empty()) {
int64_t tuple_index = visitor_shape_index_.front();
operand_literal_values.resize(tuple->operand_count());
for (int operand_index = 0; operand_index < tuple->operand_count();
++operand_index) {
if (operand_index == tuple_index) {
operand_literals.push_back(
&GetEvaluatedLiteralFor(tuple->operand(operand_index)));
} else {
operand_literal_values[operand_index] =
Literal::CreateFromShapeWithUndeterminedLeafArrays(
ShapeUtil::GetSubshape(tuple->shape(), {operand_index}));
operand_literals.push_back(&operand_literal_values[operand_index]);
}
}
} else {
for (auto operand : tuple->operands()) {
operand_literals.push_back(&GetEvaluatedLiteralFor(operand));
}
}
std::vector<const Shape*> element_shapes;
element_shapes.reserve(operand_literals.size());
for (const auto* element : operand_literals) {
element_shapes.push_back(&element->shape());
}
Literal new_result = Literal::CreateFromShapeWithUndeterminedLeafArrays(
ShapeUtil::MakeTupleShapeWithPtrs(element_shapes));
for (int i = 0, end = operand_literals.size(); i < end; ++i) {
TF_RETURN_IF_ERROR(
new_result.CopyFrom(*operand_literals[i], {i}));
}
if (evaluated_.contains(tuple)) {
CHECK(new_result.IsDetermined(visitor_shape_index_));
TF_RETURN_IF_ERROR(
evaluated_[tuple].CopyFrom(std::move(new_result),
visitor_shape_index_,
visitor_shape_index_));
} else {
evaluated_[tuple] = std::move(new_result);
}
return absl::OkStatus();
}
namespace {
template <typename ToType, typename FromType>
struct TypeConverter {
static inline ToType GetAs(FromType value) {
return static_cast<ToType>(value);
}
};
template <typename FromType>
struct TypeConverter<float, FromType> {
static inline float GetAs(FromType value) {
return static_cast<float>(value.real());
}
};
template <typename ComplexType>
class FftTransform {
public:
explicit FftTransform(const HloInstruction* fft)
: fft_type_(fft->fft_type()),
fft_rank_(fft->fft_length().size()),
fft_lengths_(fft->fft_length()) {
absl::c_reverse(fft_lengths_);
}
absl::Status ComputeFft(const HloInstruction* fft,
const Literal& input_literal,
Literal* output_literal) {
const Shape& input_shape = input_literal.shape();
const Shape& output_shape = fft->shape();
TF_RETURN_IF_ERROR(CheckParameters(input_shape, output_shape));
const auto fft_strides = ComputeStrides(fft_lengths_);
const int64_t fft_size = fft_strides[fft_rank_];
if (fft_size > 0) {
std::vector<ComplexType> data(fft_size);
int64_t buffer_size = 0;
for (auto len : fft_lengths_) {
int64_t size =
absl::has_single_bit(static_cast<uint64_t>(len)) ? len * 2 : len;
buffer_size = std::max(buffer_size, size);
}
std::vector<ComplexType> buffer(buffer_size);
const auto input_lengths = GetDimensionLengths(input_literal);
const auto output_lengths = GetDimensionLengths(*output_literal);
const auto input_strides = ComputeStrides(input_lengths, input_literal);
const auto output_strides =
ComputeStrides(output_lengths, *output_literal);
auto base_case = [&](int64_t axis, int64_t output_index,
int64_t input_index, bool within_src_bounds) {
if (axis == fft_rank_ - 1) {
CHECK(within_src_bounds);
bool input_is_zero = CopyDataFromInput(
input_literal, input_index, fft_size, fft_lengths_, fft_strides,
input_lengths, input_strides, absl::MakeSpan(data));
if (!input_is_zero) {
Sweep(fft_lengths_, fft_strides, absl::MakeSpan(data),
absl::MakeSpan(buffer));
}
CopyDataToOutput(absl::MakeSpan(data), output_index, fft_lengths_,
fft_strides, output_lengths, output_strides,
output_literal);
return true;
}
return false;
};
GenerateIndices(output_lengths, output_strides, input_lengths,
input_strides, input_shape.rank(), 0, 0, base_case);
}
return absl::OkStatus();
}
private:
static bool GatherToBuffer(absl::Span<ComplexType> data, int64_t length,
int64_t start, int64_t stride, bool expand_input,
absl::Span<ComplexType> buffer) {
CHECK_GE(buffer.size(), length);
bool input_is_zero = true;
const int64_t ub = expand_input ? length / 2 + 1 : length;
CHECK_GE(data.size(), start + (ub - 1) * stride);
for (int64_t k = 0; k < ub; k++) {
ComplexType value = data[start + k * stride];
input_is_zero &= value == ComplexType(0.0, 0.0);
buffer[k] = value;
if (expand_input) {
if (k > 0 && k < (length - ub + 1)) {
buffer[length - k] = std::conj(value);
}
}
}
return input_is_zero;
}
static inline ComplexType Twiddle(int64_t k, int64_t length, bool inverse) {
auto coeff = std::exp(ComplexType(0.0, -2.0 * M_PI * k / length));
return inverse ? std::conj(coeff) : coeff;
}
static void NaiveDft1D(int64_t length, int64_t start, int64_t stride,
bool inverse, bool contract_output, bool expand_input,
absl::Span<ComplexType> data,
absl::Span<ComplexType> buffer) {
const bool input_is_zero =
GatherToBuffer(data, length, start, stride, expand_input, buffer);
if (!input_is_zero) {
const int64_t ub = contract_output ? length / 2 + 1 : length;
for (int64_t k = 0; k < ub; k++) {
ComplexType value = ComplexType(0.0, 0.0);
for (int n = 0; n < length; n++) {
value += buffer[n] * Twiddle(n * k, length, inverse);
}
data[start + k * stride] =
inverse ? value / ComplexType(length, 0.0) : value;
}
}
}
static void Fft1D(int64_t length, int64_t start, int64_t stride, bool inverse,
bool contract_output, bool expand_input,
absl::Span<ComplexType> data,
absl::Span<ComplexType> buffer) {
CHECK(absl::has_single_bit(static_cast<uint64_t>(length)));
const bool input_is_zero =
GatherToBuffer(data, length, start, stride, expand_input, buffer);
if (!input_is_zero) {
auto generate_twiddles = [](int64_t length, bool inverse) {
std::vector<ComplexType> twiddles;
twiddles.reserve(length / 2);
for (int64_t k = 0; k < length / 2; k++) {
twiddles.push_back(Twiddle(k, length, inverse));
}
return twiddles;
};
int64_t in_base = length;
int64_t out_base = 0;
for (int64_t num_blocks = 1; num_blocks < length; num_blocks *= 2) {
std::swap(in_base, out_base);
auto twiddles = generate_twiddles(num_blocks * 2, inverse);
const int64_t block_size = length / num_blocks;
const int64_t next_iteration_block_size = block_size / 2;
for (int64_t block = 0; block < num_blocks; block++) {
const int64_t in_offset = in_base + block * block_size;
const int64_t out_offset =
out_base + block * next_iteration_block_size;
for (int64_t pair = 0; pair < block_size / 2; pair++) {
const ComplexType even = buffer[in_offset + pair];
const ComplexType odd = buffer[in_offset + block_size / 2 + pair];
const ComplexType twiddled_odd = twiddles[block] * odd;
buffer[out_offset + pair] = even + twiddled_odd;
buffer[out_offset + length / 2 + pair] = even - twiddled_odd;
}
}
}
const int64_t ub = contract_output ? length / 2 + 1 : length;
for (int64_t k = 0; k < ub; k++) {
ComplexType value = buffer[out_base + k];
data[start + k * stride] =
inverse ? value / ComplexType(length, 0.0) : value;
}
}
}
static void Dft1D(int64_t length, int64_t start, int64_t stride, bool inverse,
bool contract_output, bool expand_input,
absl::Span<ComplexType> data,
absl::Span<ComplexType> buffer) {
if (absl::has_single_bit(static_cast<uint64_t>(length))) {
Fft1D(length, start, stride, inverse, contract_output, expand_input, data,
buffer);
} else {
NaiveDft1D(length, start, stride, inverse, contract_output, expand_input,
data, buffer);
}
}
static std::vector<int64_t> GetDimensionLengths(const Literal& literal) {
auto dimensions = literal.shape().dimensions();
return std::vector<int64_t>(dimensions.rbegin(), dimensions.rend());
}
static std::vector<int64_t> ComputeStrides(
const absl::Span<const int64_t> lengths, const Layout& layout) {
const int64_t num_dimensions = lengths.size();
CHECK_EQ(num_dimensions, layout.minor_to_major_size());
std::vector<int64_t> strides(num_dimensions + 1);
int64_t stride = 1;
for (int64_t i = 0; i < num_dimensions; i++) {
const int64_t index = (num_dimensions - 1) - layout.minor_to_major(i);
strides[index] = stride;
stride *= lengths[index];
}
strides[num_dimensions] = stride;
return strides;
}
static std::vector<int64_t> ComputeStrides(
const absl::Span<const int64_t> lengths) {
return ComputeStrides(lengths,
LayoutUtil::GetDefaultLayoutForRank(lengths.size()));
}
static std::vector<int64_t> ComputeStrides(
const absl::Span<const int64_t> lengths, const Literal& literal) {
return literal.shape().has_layout()
? ComputeStrides(lengths, literal.shape().layout())
: ComputeStrides(lengths);
}
void Sweep(const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
absl::Span<ComplexType> data, absl::Span<ComplexType> buffer) {
const bool inverse =
fft_type_ == FftType::IFFT || fft_type_ == FftType::IRFFT;
const bool input_is_truncated = fft_type_ == FftType::IRFFT;
const bool output_is_truncated = fft_type_ == FftType::RFFT;
std::function<void(int64_t, int64_t, int64_t)> sweep =
[&](int64_t sweep_axis, int64_t axis, int64_t start) {
if (axis < 0) {
const int64_t length = fft_lengths[sweep_axis];
const int64_t stride = fft_strides[sweep_axis];
const bool expand_input = input_is_truncated && sweep_axis == 0;
const bool contract_oputput =
output_is_truncated && sweep_axis == 0;
Dft1D(length, start, stride, inverse, contract_oputput,
expand_input, data, buffer);
} else if (axis == sweep_axis) {
sweep(sweep_axis, axis - 1, start);
} else {
const int64_t length = fft_lengths[axis];
const bool is_truncated = input_is_truncated || output_is_truncated;
const int64_t ub =
is_truncated && axis == 0 ? (length / 2) + 1 : length;
for (int64_t i = 0; i < ub; i++) {
sweep(sweep_axis, axis - 1, start + i * fft_strides[axis]);
}
}
};
if (input_is_truncated) {
for (int64_t sweep_axis = fft_rank_ - 1; sweep_axis >= 0; sweep_axis--) {
sweep(sweep_axis, fft_rank_ - 1, 0);
}
} else {
for (int64_t sweep_axis = 0; sweep_axis < fft_rank_; sweep_axis++) {
sweep(sweep_axis, fft_rank_ - 1, 0);
}
}
}
template <typename BaseFn>
static void GenerateIndices(const absl::Span<const int64_t> dst_lengths,
const absl::Span<const int64_t> dst_strides,
const absl::Span<const int64_t> src_lengths,
const absl::Span<const int64_t> src_strides,
int64_t rank, int64_t dst_start,
int64_t src_start, BaseFn&& base) {
CHECK_EQ(dst_lengths.size() + 1, dst_strides.size());
CHECK_GE(dst_lengths.size(), rank);
CHECK_EQ(src_lengths.size() + 1, src_strides.size());
CHECK_GE(src_lengths.size(), rank);
std::function<void(int64_t, int64_t, int64_t, bool)> generate =
[&](int64_t axis, int64_t dst_index, int64_t src_index,
bool within_src_bounds) {
if (!base(axis, dst_index, src_index, within_src_bounds)) {
for (int64_t i = 0; i < dst_lengths[axis]; i++) {
within_src_bounds &= i < src_lengths[axis];
generate(axis - 1, dst_index, src_index, within_src_bounds);
dst_index += dst_strides[axis];
src_index += src_strides[axis];
}
}
};
generate(rank - 1, dst_start, src_start, true);
}
template <typename InputType>
bool CopyDataFromInput(const Literal& input_literal, int64_t input_start,
int64_t fft_size,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> input_lengths,
const absl::Span<const int64_t> input_strides,
absl::Span<ComplexType> data) {
CHECK_GE(data.size(), fft_size);
const bool input_is_truncated = fft_type_ == FftType::IRFFT;
bool input_is_zero = true;
const InputType* input_data = input_literal.data<InputType>().data();
auto base_case = [&](int64_t axis, int64_t dst_index, int64_t src_index,
bool within_src_bounds) {
if (axis == 0) {
const int64_t length = fft_lengths[axis];
const int64_t ub = input_is_truncated ? (length / 2) + 1 : length;
for (int64_t i = 0; i < ub; i++) {
ComplexType value = ComplexType(0);
if (within_src_bounds && i < input_lengths[axis]) {
value = TypeConverter<ComplexType, InputType>::GetAs(
input_data[src_index + i * input_strides[axis]]);
input_is_zero &= value == ComplexType(0.0, 0.0);
}
data[dst_index + i * fft_strides[axis]] = value;
}
return true;
}
return false;
};
GenerateIndices(fft_lengths, fft_strides, input_lengths, input_strides,
fft_rank_, 0, input_start, base_case);
return input_is_zero;
}
template <typename OutputType>
void CopyDataToOutput(const absl::Span<ComplexType> data,
int64_t output_start,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> output_lengths,
const absl::Span<const int64_t> output_strides,
Literal* output_literal) {
const bool output_is_truncated = fft_type_ == FftType::RFFT;
OutputType* output_data = output_literal->data<OutputType>().data();
auto base_case = [&](int64_t axis, int64_t dst_index, int64_t src_index,
bool within_src_bounds) {
if (axis == 0) {
const int64_t length = fft_lengths[axis];
const int64_t ub = output_is_truncated ? (length / 2) + 1 : length;
for (int64_t i = 0; i < output_lengths[axis]; i++) {
OutputType value = OutputType(0);
if (within_src_bounds && i < ub) {
value = TypeConverter<OutputType, ComplexType>::GetAs(
data[src_index + i * fft_strides[axis]]);
}
output_data[dst_index + i * output_strides[axis]] = value;
}
return true;
}
return false;
};
GenerateIndices(output_lengths, output_strides, fft_lengths, fft_strides,
fft_rank_, output_start, 0, base_case);
}
bool CopyDataFromInput(const Literal& input_literal, int64_t input_start,
int64_t fft_size,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> input_lengths,
const absl::Span<const int64_t> input_strides,
absl::Span<ComplexType> data) {
const bool input_is_float = fft_type_ == FftType::RFFT;
if (input_is_float) {
return CopyDataFromInput<float>(input_literal, input_start, fft_size,
fft_lengths, fft_strides, input_lengths,
input_strides, data);
} else {
return CopyDataFromInput<complex64>(input_literal, input_start, fft_size,
fft_lengths, fft_strides,
input_lengths, input_strides, data);
}
}
void CopyDataToOutput(const absl::Span<ComplexType> data,
int64_t output_start,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> output_lengths,
const absl::Span<const int64_t> output_strides,
Literal* output_literal) {
const bool output_is_float = fft_type_ == FftType::IRFFT;
if (output_is_float) {
CopyDataToOutput<float>(data, output_start, fft_lengths, fft_strides,
output_lengths, output_strides, output_literal);
} else {
CopyDataToOutput<complex64>(data, output_start, fft_lengths, fft_strides,
output_lengths, output_strides,
output_literal);
}
}
absl::Status CheckParameters(const Shape& input_shape,
const Shape& output_shape) {
if (fft_rank_ <= 0) {
return InvalidArgument("Zero or negative FFT rank.");
}
if (*absl::c_min_element(fft_lengths_) < 0) {
return InvalidArgument("Negative FFT length.");
}
TF_CHECK_OK(ShapeUtil::ValidateShape(input_shape));
if (!input_shape.IsArray()) {
return Unimplemented("Only array input shapes are supported.");
}
auto input_elt_type = input_shape.element_type();
if (fft_type_ == FftType::RFFT && input_elt_type != PrimitiveType::F32) {
return InvalidArgument("Invalid input type: %d, must be %d (float).",
input_elt_type, PrimitiveType::F32);
}
if (fft_type_ != FftType::RFFT && input_elt_type != PrimitiveType::C64) {
return InvalidArgument("Invalid input type: %d, must be %d (complex64).",
input_elt_type, PrimitiveType::C64);
}
const int64_t input_rank = input_shape.rank();
if (input_rank < fft_rank_) {
return InvalidArgument("Input shape rank is smaller than FFT rank.");
}
TF_CHECK_OK(ShapeUtil::ValidateShape(output_shape));
if (!output_shape.IsArray()) {
return Unimplemented("Only array output shapes are supported.");
}
auto output_elt_type = output_shape.element_type();
if (fft_type_ == FftType::IRFFT && output_elt_type != PrimitiveType::F32) {
return InvalidArgument("Invalid output type: %d, must be %d (float).",
output_elt_type, PrimitiveType::F32);
}
if (fft_type_ != FftType::IRFFT && output_elt_type != PrimitiveType::C64) {
return InvalidArgument("Invalid output type: %d, must be %d (complex64).",
output_elt_type, PrimitiveType::C64);
}
const int64_t output_rank = output_shape.rank();
if (output_rank < fft_rank_) {
return InvalidArgument("Output shape rank is smaller than FFT rank.");
}
if (input_rank != output_rank) {
return InvalidArgument(
"Ranks of input shape and output shape do not match.");
}
for (int64_t dim = 0; dim < input_rank - fft_rank_; dim++) {
if (ShapeUtil::GetDimension(input_shape, dim) !=
ShapeUtil::GetDimension(output_shape, dim)) {
return InvalidArgument(
"Higher dimension lengths of input shape and output shape do not "
"match.");
}
}
return absl::OkStatus();
}
private:
const FftType fft_type_;
const int64_t fft_rank_;
std::vector<int64_t> fft_lengths_;
};
}
absl::Status HloEvaluator::HandleFft(const HloInstruction* fft) {
const Literal& input_literal = GetEvaluatedLiteralFor(fft->operand(0));
Literal output_literal = Literal::CreateFromShape(fft->shape());
FftTransform<complex128> transform(fft);
TF_RETURN_IF_ERROR(transform.ComputeFft(fft, input_literal, &output_literal));
evaluated_[fft] = std::move(output_literal);
return absl::OkStatus();
}
ShapeUtil::IndexIterationSpace IterationSpaceForOutputBatchIndices(
const Shape& output_shape, const GatherDimensionNumbers& dim_numbers) {
int64_t output_rank = output_shape.dimensions_size();
std::vector<int64_t> index_base(output_rank, 0);
std::vector<int64_t> index_count;
index_count.reserve(output_rank);
for (int64_t i = 0; i < output_rank; i++) {
bool is_output_batch_dim =
!absl::c_binary_search(dim_numbers.offset_dims(), i);
index_count.push_back(is_output_batch_dim ? output_shape.dimensions(i) : 1);
}
return {std::move(index_base), std::move(index_count),
std::vector<int64_t>(output_rank, 1)};
}
ShapeUtil::IndexIterationSpace IterationSpaceForOutputOffsetIndices(
int64_t output_rank, absl::Span<const int64_t> slice_sizes,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64_t> index_base(output_rank, 0);
std::vector<int64_t> index_count(output_rank, 1);
int64_t slice_sizes_idx = 0;
for (int64_t i = 0; i < output_rank; i++) {
bool is_output_window_dim =
absl::c_binary_search(dim_numbers.offset_dims(), i);
if (is_output_window_dim) {
while (absl::c_binary_search(dim_numbers.collapsed_slice_dims(),
slice_sizes_idx)) {
slice_sizes_idx++;
}
index_count[i] = slice_sizes[slice_sizes_idx++];
}
}
return {std::move(index_base), std::move(index_count),
std::vector<int64_t>(output_rank, 1)};
}
class OutputBatchIndexToInputIndex {
public:
explicit OutputBatchIndexToInputIndex(
const GatherDimensionNumbers* dim_numbers, const Shape& input_shape,
const Shape& output_shape, const Literal* start_indices)
: dim_numbers_(*dim_numbers), start_indices_(*start_indices) {
for (int64_t i = 0; i < output_shape.dimensions_size(); i++) {
output_dim_is_batch_dims_.push_back(
!absl::c_binary_search(dim_numbers_.offset_dims(), i));
}
for (int64_t i = 0; i < input_shape.dimensions_size(); i++) {
int64_t index_of_input_dim_in_index_vector =
std::distance(dim_numbers_.start_index_map().begin(),
absl::c_find(dim_numbers_.start_index_map(), i));
if (index_of_input_dim_in_index_vector ==
dim_numbers_.start_index_map_size()) {
input_dim_value_to_index_vector_.push_back(-1);
} else {
input_dim_value_to_index_vector_.push_back(
index_of_input_dim_in_index_vector);
}
}
index_vector_index_.resize(start_indices_.shape().dimensions_size());
input_index_.resize(input_shape.dimensions_size());
int64_t index_vector_size =
start_indices_.shape().dimensions(dim_numbers_.index_vector_dim());
index_vector_.resize(index_vector_size);
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> output_index) {
PropagateOutputIndexGatherDimsToIndexVectorIndex(output_index);
TF_RETURN_IF_ERROR(FetchIndexVector());
PropagateIndexVectorToInputIndex();
return absl::Span<const int64_t>(input_index_);
}
private:
void PropagateOutputIndexGatherDimsToIndexVectorIndex(
absl::Span<const int64_t> output_index) {
int64_t index_vector_index_i = 0;
for (int64_t i = 0, e = output_index.size(); i < e; i++) {
if (!output_dim_is_batch_dims_[i]) {
continue;
}
if (index_vector_index_i == dim_numbers_.index_vector_dim()) {
index_vector_index_i++;
}
index_vector_index_[index_vector_index_i++] = output_index[i];
}
}
absl::Status FetchIndexVector() {
int64_t index_vector_dim = dim_numbers_.index_vector_dim();
for (int64_t i = 0, e = index_vector_.size(); i < e; i++) {
index_vector_index_[index_vector_dim] = i;
auto start_index = start_indices_.GetIntegralAsS64(index_vector_index_);
TF_RET_CHECK(start_index.has_value());
index_vector_[i] = *start_index;
}
return absl::OkStatus();
}
void PropagateIndexVectorToInputIndex() {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_index_vector_[i] != -1) {
input_index_[i] = index_vector_[input_dim_value_to_index_vector_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_index_vector_;
std::vector<bool> output_dim_is_batch_dims_;
std::vector<int64_t> index_vector_index_;
std::vector<int64_t> index_vector_;
std::vector<int64_t> input_index_;
const GatherDimensionNumbers& dim_numbers_;
const Literal& start_indices_;
};
class OutputOffsetIndexToInputIndex {
public:
explicit OutputOffsetIndexToInputIndex(
const GatherDimensionNumbers& dim_numbers, const Shape& input_shape,
const Shape& output_shape) {
std::vector<int64_t> window_index_to_output_index;
int64_t output_index_count = 0;
for (int64_t i = 0; i < output_shape.dimensions_size(); i++) {
if (absl::c_binary_search(dim_numbers.offset_dims(), i)) {
window_index_to_output_index.push_back(output_index_count++);
} else {
output_index_count++;
}
}
int64_t window_dim_count = 0;
for (int64_t i = 0; i < input_shape.dimensions_size(); i++) {
if (absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
input_dim_value_to_output_index_.push_back(-1);
} else {
input_dim_value_to_output_index_.push_back(
window_index_to_output_index[window_dim_count++]);
}
}
input_index_.resize(input_shape.dimensions_size());
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> output_index) {
PropagateOutputIndexWindowDimsToInputIndex(output_index);
return absl::Span<const int64_t>(input_index_);
}
int64_t input_dim_value_to_output_index(int64_t input_dim) {
return input_dim_value_to_output_index_[input_dim];
}
private:
void PropagateOutputIndexWindowDimsToInputIndex(
absl::Span<const int64_t> output_index) {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_output_index_[i] != -1) {
input_index_[i] = output_index[input_dim_value_to_output_index_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_output_index_;
std::vector<int64_t> input_index_;
};
static absl::StatusOr<std::reference_wrapper<const Literal>>
ReshapedGatherIndices(int64_t index_vector_dim, const Literal& start_indices,
Literal* reshaped_start_indices) {
if (start_indices.shape().dimensions_size() != index_vector_dim) {
return std::cref(start_indices);
}
std::vector<int64_t> new_shape(start_indices.shape().dimensions().begin(),
start_indices.shape().dimensions().end());
new_shape.push_back(1);
if (start_indices.shape().is_dynamic()) {
TF_ASSIGN_OR_RETURN(*reshaped_start_indices,
start_indices.ToStatic().Reshape(new_shape));
} else {
TF_ASSIGN_OR_RETURN(*reshaped_start_indices,
start_indices.Reshape(new_shape));
}
return std::cref(*reshaped_start_indices);
}
absl::Status HloEvaluator::HandleGather(const HloInstruction* gather) {
Literal result = Literal::CreateFromShape(gather->shape());
const Shape& shape = gather->shape();
const GatherDimensionNumbers& dim_numbers =
gather->gather_dimension_numbers();
const Literal& operand = GetEvaluatedLiteralFor(gather->operand(0));
Literal reshaped_start_indices;
TF_ASSIGN_OR_RETURN(
const Literal& start_indices,
ReshapedGatherIndices(dim_numbers.index_vector_dim(),
GetEvaluatedLiteralFor(gather->operand(1)),
&reshaped_start_indices));
ShapeUtil::IndexIterationSpace start_indices_iteration_space =
IterationSpaceForOutputBatchIndices(shape, dim_numbers);
ShapeUtil::IndexIterationSpace offset_indices_iteration_space =
IterationSpaceForOutputOffsetIndices(
shape.dimensions_size(), gather->gather_slice_sizes(), dim_numbers);
std::vector<int64_t> input_index(operand.shape().dimensions_size());
std::vector<int64_t> output_index(gather->shape().dimensions_size());
std::vector<int64_t> input_index_clamped(operand.shape().dimensions_size());
OutputBatchIndexToInputIndex output_batch_index_to_input_index(
&gather->gather_dimension_numbers(), operand.shape(),
shape, &start_indices);
OutputOffsetIndexToInputIndex output_offset_index_to_input_index(
gather->gather_dimension_numbers(), operand.shape(),
shape);
const Shape& operand_shape = operand.shape();
if (ShapeUtil::IsZeroElementArray(operand_shape)) {
evaluated_[gather] = std::move(result);
return absl::OkStatus();
}
auto gather_inner_loop_body =
[&](absl::Span<const int64_t> output_window_index,
absl::Span<const int64_t> input_gather_index,
absl::Span<const int64_t> output_gather_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
absl::Span<const int64_t> input_window_index,
output_offset_index_to_input_index(output_window_index));
for (int i = 0, e = output_index.size(); i < e; i++) {
output_index[i] = output_gather_index[i] + output_window_index[i];
DCHECK_LT(output_index[i], shape.dimensions(i));
}
for (int i = 0, e = input_gather_index.size(); i < e; i++) {
int64_t output_dim =
output_offset_index_to_input_index.input_dim_value_to_output_index(i);
int64_t output_dim_size =
output_dim == -1 ? 1 : shape.dimensions(output_dim);
input_index_clamped[i] =
std::min(operand_shape.dimensions(i) - output_dim_size,
std::max(int64_t{0}, input_gather_index[i]));
}
for (int i = 0, e = input_index.size(); i < e; i++) {
input_index[i] = input_index_clamped[i] + input_window_index[i];
DCHECK_GE(input_index[i], 0);
DCHECK_LT(input_index[i], operand_shape.dimensions(i));
}
result.CopyElementFrom(operand, input_index, output_index);
return true;
};
auto gather_outer_loop_body =
[&](absl::Span<const int64_t> output_gather_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(absl::Span<const int64_t> input_gather_index,
output_batch_index_to_input_index(output_gather_index));
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
shape, offset_indices_iteration_space,
std::bind(gather_inner_loop_body, std::placeholders::_1,
input_gather_index, output_gather_index)));
return true;
};
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
shape, start_indices_iteration_space, gather_outer_loop_body));
evaluated_[gather] = std::move(result);
return absl::OkStatus();
}
namespace {
absl::StatusOr<std::reference_wrapper<const Literal>> ReshapedScatterIndices(
int64_t index_vector_dim, const Literal& indices,
Literal* reshaped_indices) {
if (indices.shape().dimensions_size() != index_vector_dim) {
return std::cref(indices);
}
std::vector<int64_t> new_shape(indices.shape().dimensions().begin(),
indices.shape().dimensions().end());
new_shape.push_back(1);
if (indices.shape().is_dynamic()) {
TF_ASSIGN_OR_RETURN(*reshaped_indices,
indices.ToStatic().Reshape(new_shape));
} else {
TF_ASSIGN_OR_RETURN(*reshaped_indices, indices.Reshape(new_shape));
}
return std::cref(*reshaped_indices);
}
template <bool kForUpdateWindowIndices>
ShapeUtil::IndexIterationSpace GetIterationSpaceImpl(
absl::Span<const int64_t> updates_dims,
const ScatterDimensionNumbers& dim_numbers) {
int64_t updates_rank = updates_dims.size();
std::vector<int64_t> index_base(updates_rank, 0);
std::vector<int64_t> index_count(updates_rank, 1);
for (int64_t i = 0; i < updates_rank; i++) {
if (kForUpdateWindowIndices) {
bool is_update_window_dim =
absl::c_binary_search(dim_numbers.update_window_dims(), i);
if (is_update_window_dim) {
index_count[i] = updates_dims[i];
}
} else {
bool is_update_scatter_dim =
!absl::c_binary_search(dim_numbers.update_window_dims(), i);
if (is_update_scatter_dim) {
index_count[i] = updates_dims[i];
}
}
}
return {std::move(index_base), std::move(index_count),
std::vector<int64_t>(updates_rank, 1)};
}
ShapeUtil::IndexIterationSpace IterationSpaceForUpdateScatterIndices(
absl::Span<const int64_t> updates_dims,
const ScatterDimensionNumbers& dim_numbers) {
return GetIterationSpaceImpl<false>(updates_dims,
dim_numbers);
}
ShapeUtil::IndexIterationSpace IterationSpaceForUpdateWindowIndices(
absl::Span<const int64_t> updates_dims,
const ScatterDimensionNumbers& dim_numbers) {
return GetIterationSpaceImpl<true>(updates_dims,
dim_numbers);
}
class UpdateScatterIndexToInputIndex {
public:
explicit UpdateScatterIndexToInputIndex(
const ScatterDimensionNumbers& dim_numbers, int64_t input_rank,
int64_t updates_rank, const Literal* scatter_indices)
: dim_numbers_(dim_numbers), scatter_indices_(*scatter_indices) {
for (int64_t i = 0; i < updates_rank; i++) {
update_dim_is_scatter_dims_.push_back(
!absl::c_binary_search(dim_numbers_.update_window_dims(), i));
}
for (int64_t i = 0; i < input_rank; i++) {
int64_t index_of_input_dim_in_index_vector =
FindIndex(dim_numbers_.scatter_dims_to_operand_dims(), i);
if (index_of_input_dim_in_index_vector ==
dim_numbers_.scatter_dims_to_operand_dims_size()) {
input_dim_value_to_index_vector_.push_back(-1);
} else {
input_dim_value_to_index_vector_.push_back(
index_of_input_dim_in_index_vector);
}
}
index_vector_index_.resize(scatter_indices_.shape().dimensions_size());
input_index_.resize(input_rank);
int64_t index_vector_size =
scatter_indices_.shape().dimensions(dim_numbers_.index_vector_dim());
index_vector_.resize(index_vector_size);
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> update_index) {
PropagateUpdateIndexScatterDimsToIndexVectorIndex(update_index);
TF_RETURN_IF_ERROR(FetchIndexVector());
PropagateIndexVectorToInputIndex();
return absl::Span<const int64_t>(input_index_);
}
private:
void PropagateUpdateIndexScatterDimsToIndexVectorIndex(
absl::Span<const int64_t> update_index) {
int64_t index_vector_index_i = 0;
for (int64_t i = 0, e = update_index.size(); i < e; i++) {
if (!update_dim_is_scatter_dims_[i]) {
continue;
}
if (index_vector_index_i == dim_numbers_.index_vector_dim()) {
index_vector_index_i++;
}
index_vector_index_[index_vector_index_i++] = update_index[i];
}
}
absl::Status FetchIndexVector() {
int64_t index_vector_dim = dim_numbers_.index_vector_dim();
for (int64_t i = 0, e = index_vector_.size(); i < e; i++) {
index_vector_index_[index_vector_dim] = i;
index_vector_[i] =
*scatter_indices_.GetIntegralAsS64(index_vector_index_);
}
return absl::OkStatus();
}
void PropagateIndexVectorToInputIndex() {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_index_vector_[i] != -1) {
input_index_[i] = index_vector_[input_dim_value_to_index_vector_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_index_vector_;
std::vector<bool> update_dim_is_scatter_dims_;
std::vector<int64_t> index_vector_index_;
std::vector<int64_t> index_vector_;
std::vector<int64_t> input_index_;
const ScatterDimensionNumbers& dim_numbers_;
const Literal& scatter_indices_;
};
class UpdateWindowIndexToInputIndex {
public:
explicit UpdateWindowIndexToInputIndex(
const ScatterDimensionNumbers& dim_numbers, int64_t input_rank,
int64_t update_rank) {
std::vector<int64_t> window_index_to_update_index;
int64_t update_index_count = 0;
for (int64_t i = 0; i < update_rank; i++) {
if (absl::c_binary_search(dim_numbers.update_window_dims(), i)) {
window_index_to_update_index.push_back(update_index_count++);
} else {
update_index_count++;
}
}
int64_t window_dim_count = 0;
for (int64_t i = 0; i < input_rank; i++) {
if (absl::c_binary_search(dim_numbers.inserted_window_dims(), i)) {
input_dim_value_to_update_index_.push_back(-1);
} else {
input_dim_value_to_update_index_.push_back(
window_index_to_update_index[window_dim_count++]);
}
}
input_index_.resize(input_rank);
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> update_index) {
PropagateUpdateIndexWindowDimsToInputIndex(update_index);
return absl::Span<const int64_t>(input_index_);
}
int64_t input_dim_value_to_update_index(int64_t input_dim) {
return input_dim_value_to_update_index_[input_dim];
}
private:
void PropagateUpdateIndexWindowDimsToInputIndex(
absl::Span<const int64_t> update_index) {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_update_index_[i] != -1) {
input_index_[i] = update_index[input_dim_value_to_update_index_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_update_index_;
std::vector<int64_t> input_index_;
};
}
absl::Status HloEvaluator::HandleScatter(const HloInstruction* hlo) {
auto* scatter = DynCast<HloScatterInstruction>(hlo);
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
absl::InlinedVector<const Literal*, 1> operands;
operands.reserve(scatter->scatter_operand_count());
for (const HloInstruction* operand_inst : scatter->scatter_operands()) {
operands.push_back(&GetEvaluatedLiteralFor(operand_inst));
}
Literal reshaped_scatter_indices;
TF_ASSIGN_OR_RETURN(
const Literal& scatter_indices,
ReshapedScatterIndices(dim_numbers.index_vector_dim(),
GetEvaluatedLiteralFor(scatter->scatter_indices()),
&reshaped_scatter_indices));
absl::InlinedVector<const Literal*, 1> updates;
updates.reserve(operands.size());
for (const HloInstruction* updates_inst : scatter->scatter_updates()) {
updates.push_back(&GetEvaluatedLiteralFor(updates_inst));
}
auto updates_dims = updates[0]->shape().dimensions();
auto operand_dims = operands[0]->shape().dimensions();
ShapeUtil::IndexIterationSpace scatter_indices_iteration_space =
IterationSpaceForUpdateScatterIndices(updates_dims, dim_numbers);
ShapeUtil::IndexIterationSpace window_indices_iteration_space =
IterationSpaceForUpdateWindowIndices(updates_dims, dim_numbers);
std::vector<int64_t> input_index(operand_dims.size());
std::vector<int64_t> update_index(updates_dims.size());
UpdateScatterIndexToInputIndex update_scatter_index_to_input_index(
scatter->scatter_dimension_numbers(),
operand_dims.size(), updates_dims.size(),
&scatter_indices);
UpdateWindowIndexToInputIndex update_window_index_to_input_index(
scatter->scatter_dimension_numbers(),
operand_dims.size(), updates_dims.size());
Literal result = operands.size() > 1 ? LiteralUtil::MakeTuple(operands)
: operands[0]->Clone();
auto maybe_slice = [](MutableLiteralBase& literal, int idx) {
if (literal.shape().IsTuple()) {
return MutableBorrowingLiteral(&literal, {idx});
}
DCHECK_EQ(idx, 0);
return MutableBorrowingLiteral(&literal);
};
HloEvaluator embedded_evaluator;
auto scatter_inner_loop_body =
[&](absl::Span<const int64_t> update_window_index,
absl::Span<const int64_t> input_scatter_index,
absl::Span<const int64_t> update_scatter_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
absl::Span<const int64_t> input_window_index,
update_window_index_to_input_index(update_window_index));
for (int i = 0, e = update_index.size(); i < e; i++) {
update_index[i] = update_scatter_index[i] + update_window_index[i];
DCHECK_LT(update_index[i], updates_dims[i]);
}
for (int i = 0, e = input_scatter_index.size(); i < e; i++) {
int64_t update_dim =
update_window_index_to_input_index.input_dim_value_to_update_index(i);
int64_t update_dim_size = update_dim == -1 ? 1 : updates_dims[update_dim];
if ((input_scatter_index[i] < 0) ||
(input_scatter_index[i] > operand_dims[i] - update_dim_size)) {
return true;
}
}
for (int i = 0, e = input_index.size(); i < e; i++) {
input_index[i] = input_scatter_index[i] + input_window_index[i];
}
absl::InlinedVector<Literal, 2> to_apply_args;
to_apply_args.reserve(operands.size() + updates.size());
for (int i = 0, n = operands.size(); i < n; ++i) {
to_apply_args.push_back(
LiteralUtil::GetScalarLiteral(maybe_slice(result, i), input_index));
}
for (int i = 0, n = operands.size(); i < n; ++i) {
to_apply_args.push_back(
LiteralUtil::GetScalarLiteral(*updates[i], update_index));
}
Literal updated_result =
embedded_evaluator.Evaluate(*scatter->to_apply(), to_apply_args)
.value();
embedded_evaluator.ResetVisitStates();
for (int i = 0, n = operands.size(); i < n; ++i) {
auto result_slice = maybe_slice(result, i);
LiteralUtil::SetScalarLiteral(result_slice, input_index,
maybe_slice(updated_result, i));
}
return true;
};
auto scatter_outer_loop_body =
[&](absl::Span<const int64_t> update_scatter_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
absl::Span<const int64_t> input_scatter_index,
update_scatter_index_to_input_index(update_scatter_index));
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
updates[0]->shape(), window_indices_iteration_space,
[&](absl::Span<const int64_t> update_window_index) {
return scatter_inner_loop_body(
update_window_index, input_scatter_index, update_scatter_index);
}));
return true;
};
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
updates[0]->shape(), scatter_indices_iteration_space,
scatter_outer_loop_body));
evaluated_[scatter] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleBroadcast(const HloInstruction* broadcast) {
const Literal& operand = GetEvaluatedLiteralFor(broadcast->operand(0));
TF_RET_CHECK(broadcast->shape().element_type() ==
operand.shape().element_type())
<< " broadcast from a different data type is not supported";
TF_RET_CHECK(broadcast->dimensions().size() == operand.shape().rank())
<< "broadcast dimensions is of size: " << broadcast->dimensions().size()
<< " and rank of operand_to_broadcast is: " << operand.shape().rank();
for (int64_t i = 0; i < broadcast->dimensions().size(); ++i) {
auto operand_dim_size = operand.shape().dimensions(i);
auto broadcast_dim_size =
broadcast->shape().dimensions(broadcast->dimensions(i));
TF_RET_CHECK(operand_dim_size == broadcast_dim_size) << absl::StreamFormat(
"Operand dimension %d is broadcast to output dimension %d, but the "
"sizes of these two dims do not match (%d vs %d): %s",
i, broadcast->dimensions(i), operand_dim_size, broadcast_dim_size,
broadcast->ToString());
}
TF_ASSIGN_OR_RETURN(
evaluated_[broadcast],
operand.Broadcast(broadcast->shape(), broadcast->dimensions()));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAfterAll(const HloInstruction* after_all) {
evaluated_[after_all] = LiteralUtil::CreateToken();
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAddDependency(
const HloInstruction* add_dependency) {
evaluated_[add_dependency] =
GetEvaluatedLiteralFor(add_dependency->operand(0)).Clone();
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleGetTupleElement(
const HloInstruction* get_tuple_element) {
const auto result_shape = get_tuple_element->shape();
const int64_t index = get_tuple_element->tuple_index();
auto operand = get_tuple_element->operand(0);
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferGetTupleElementShape(operand->shape(), index));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
const Literal& operand_tuple_literal = GetEvaluatedLiteralFor(operand);
evaluated_[get_tuple_element] =
Literal(ShapeUtil::GetTupleElementShape(operand->shape(), index));
return evaluated_[get_tuple_element].CopyFrom(operand_tuple_literal,
{},
{index});
}
absl::Status HloEvaluator::HandleCopy(const HloInstruction* copy) {
if (copy->shape().element_type() !=
copy->operand(0)->shape().element_type()) {
TF_ASSIGN_OR_RETURN(Literal result,
GetEvaluatedLiteralFor(copy->operand(0))
.Convert(copy->shape().element_type()));
TF_RET_CHECK(ShapeUtil::Compatible(copy->shape(), result.shape()));
evaluated_[copy] = std::move(result);
} else {
TF_RET_CHECK(
ShapeUtil::Compatible(copy->shape(), copy->operand(0)->shape()));
evaluated_[copy] = GetEvaluatedLiteralFor(copy->operand(0)).Clone();
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAsyncStart(const HloInstruction* async_start) {
std::vector<const Literal*> arg_literals;
arg_literals.reserve(async_start->operands().size());
for (auto operand : async_start->operands()) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(&arg_literal);
}
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(
Literal result,
embedded_evaluator->Evaluate(*async_start->async_wrapped_computation(),
arg_literals));
evaluated_[async_start] = Literal(async_start->shape());
for (int i = 0; i < arg_literals.size(); ++i) {
TF_RETURN_IF_ERROR(evaluated_[async_start].CopyFrom(
*arg_literals[i], {0, i},
{}));
}
TF_RETURN_IF_ERROR(evaluated_[async_start].MoveFrom(
std::move(result), {1}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAsyncUpdate(
const HloInstruction* async_update) {
const Literal& operand_tuple_literal =
GetEvaluatedLiteralFor(async_update->operand(0));
evaluated_[async_update] = Literal(async_update->shape());
TF_RETURN_IF_ERROR(evaluated_[async_update].CopyFrom(operand_tuple_literal,
{},
{}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAsyncDone(const HloInstruction* async_done) {
const Literal& operand_tuple_literal =
GetEvaluatedLiteralFor(async_done->operand(0));
evaluated_[async_done] = Literal(async_done->shape());
TF_RETURN_IF_ERROR(evaluated_[async_done].CopyFrom(operand_tuple_literal,
{},
{1}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCopyStart(const HloInstruction* copy_start) {
if (copy_start->user_count() != 1 ||
copy_start->users().at(0)->opcode() != HloOpcode::kCopyDone) {
return absl::FailedPreconditionError(
absl::StrCat("Cannot evaluate a kCopyStart that doesn't have a single "
"kCopyDone user. Instruction: ",
copy_start->ToString()));
}
const Literal context_literal = LiteralUtil::CreateR0<uint32_t>(0);
evaluated_[copy_start] = LiteralUtil::MakeTuple(
{&GetEvaluatedLiteralFor(copy_start->operand(0)),
&GetEvaluatedLiteralFor(copy_start->operand(0)), &context_literal});
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCopyDone(const HloInstruction* copy_done) {
const HloInstruction* operand = copy_done->operand(0);
if (operand->opcode() != HloOpcode::kCopyStart) {
return absl::FailedPreconditionError(
absl::StrCat("Cannot evaluate a kCopyDone that doesn't have a "
"kCopyStart as operand. Instruction: ",
copy_done->ToString()));
}
const Literal& operand_tuple_literal = GetEvaluatedLiteralFor(operand);
evaluated_[copy_done] =
Literal(ShapeUtil::GetTupleElementShape(operand->shape(), 0));
TF_RETURN_IF_ERROR(evaluated_[copy_done].CopyFrom(operand_tuple_literal,
{},
{0}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCall(const HloInstruction* call) {
auto* computation = call->to_apply();
auto operands = call->operands();
std::vector<const Literal*> arg_literals;
arg_literals.reserve(operands.size());
for (auto operand : operands) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(&arg_literal);
}
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(Literal result,
embedded_evaluator->Evaluate(*computation, arg_literals));
evaluated_[call] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleFusion(const HloInstruction* fusion) {
HloModuleConfig config;
HloModule empty_hlo_module("EmptyModuleForFusion", config,
std::make_unique<CompilationEnvironments>(
fusion->GetModule()->comp_envs()));
HloCloneContext context(&empty_hlo_module);
auto cloned_fused_computation =
fusion->fused_instructions_computation()->Clone(
"clone_with_layout", &context);
for (auto* instruction : cloned_fused_computation->instructions()) {
if (!LayoutUtil::HasLayout(instruction->shape())) {
LayoutUtil::SetToDefaultLayout(instruction->mutable_shape());
}
}
auto readded_computation =
empty_hlo_module.AddEntryComputation(std::move(cloned_fused_computation));
auto operands = fusion->operands();
std::vector<const Literal*> arg_literals;
arg_literals.reserve(operands.size());
for (auto operand : operands) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(&arg_literal);
}
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(Literal result, embedded_evaluator->Evaluate(
*readded_computation, arg_literals));
evaluated_[fusion] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConditional(
const HloInstruction* conditional) {
const auto& branch_index_literal =
GetEvaluatedLiteralFor(conditional->operand(0));
int branch_index;
if (conditional->operand(0)->shape().element_type() == PRED) {
branch_index = branch_index_literal.Get<bool>({}) ? 0 : 1;
} else {
branch_index = branch_index_literal.Get<int32_t>({});
if (branch_index < 0 || branch_index >= conditional->branch_count()) {
branch_index = conditional->branch_count() - 1;
}
}
const auto& branch_computation_arg =
GetEvaluatedLiteralFor(conditional->operand(1 + branch_index));
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(Literal result,
embedded_evaluator->Evaluate(
*conditional->branch_computation(branch_index),
{&branch_computation_arg}));
evaluated_[conditional] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConvert(const HloInstruction* convert) {
const HloInstruction* operand = convert->operand(0);
TF_RET_CHECK(ShapeUtil::SameDimensions(operand->shape(), convert->shape()));
TF_ASSIGN_OR_RETURN(Literal result, GetEvaluatedLiteralFor(operand).Convert(
convert->shape().element_type()));
evaluated_[convert] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleDynamicSlice(
const HloInstruction* dynamic_slice) {
auto operand = dynamic_slice->operand(0);
auto start_indices = dynamic_slice->operand(1);
auto result_shape = dynamic_slice->shape();
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(),
Cast<HloDynamicSliceInstruction>(dynamic_slice)->index_shapes(),
dynamic_slice->dynamic_slice_sizes()));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape is set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
TF_RET_CHECK(
primitive_util::IsIntegralType(start_indices->shape().element_type()));
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
std::vector<int64_t> start =
GetS64Indices(absl::MakeConstSpan(dynamic_slice->operands()).subspan(1));
for (int64_t i = 0; i < start.size(); ++i) {
start[i] = std::min<int64_t>(
std::max(int64_t{0}, start[i]),
operand_literal.shape().dimensions(i) - result_shape.dimensions(i));
}
std::vector<int64_t> operand_index(start.size());
Literal result(result_shape);
const size_t element_byte_size =
primitive_util::ByteWidth(result_shape.element_type());
auto* operand_base = static_cast<const char*>(operand_literal.untyped_data());
auto func = [&](void* dest, absl::Span<const int64_t> result_index) {
for (int64_t i = 0; i < operand_index.size(); ++i) {
CHECK_GE(result_index[i] + start[i], 0);
operand_index[i] = result_index[i] + start[i];
}
auto* src = operand_base + (element_byte_size *
IndexUtil::MultidimensionalIndexToLinearIndex(
operand_literal.shape(), operand_index));
std::memcpy(dest, src, element_byte_size);
return true;
};
TF_RETURN_IF_ERROR(result.PopulateInplace(func));
evaluated_[dynamic_slice] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleDynamicUpdateSlice(const HloInstruction* dus) {
auto operand = dus->operand(0);
auto update = dus->operand(1);
auto start_indices = dus->operand(2);
auto result_shape = dus->shape();
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(),
Cast<HloDynamicUpdateSliceInstruction>(dus)->index_shapes()));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape is set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
TF_RET_CHECK(
primitive_util::IsIntegralType(start_indices->shape().element_type()));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, operand->shape()));
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const Literal& update_literal = GetEvaluatedLiteralFor(update);
auto result = operand_literal.Clone();
const auto rank = result.shape().rank();
std::vector<int64_t> start =
GetS64Indices(absl::MakeConstSpan(dus->operands()).subspan(2));
for (int64_t i = 0; i < rank; ++i) {
start[i] = std::min<int64_t>(
std::max<int64_t>(0, start[i]),
result.shape().dimensions(i) - update_literal.shape().dimensions(i));
}
std::vector<int64_t> result_index(rank, 0);
auto func = [&](absl::Span<const int64_t> update_index) {
std::transform(update_index.begin(), update_index.end(), start.begin(),
result_index.begin(), std::plus<int64_t>());
result.CopyElementFrom(update_literal, update_index, result_index);
return true;
};
std::vector<int64_t> base(update_literal.shape().dimensions_size(), 0);
std::vector<int64_t> step(update_literal.shape().dimensions_size(), 1);
ShapeUtil::ForEachIndexNoStatus(update_literal.shape(), base,
update_literal.shape().dimensions(), step,
func);
evaluated_[dus] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSelect(const HloInstruction* select) {
const auto& pred = GetEvaluatedLiteralFor(select->operand(0));
const auto& on_true = GetEvaluatedLiteralFor(select->operand(1));
const auto& on_false = GetEvaluatedLiteralFor(select->operand(2));
if (ShapeUtil::IsScalar(pred.shape())) {
if (pred.Get<bool>({})) {
evaluated_[select] = on_true.Clone();
} else {
evaluated_[select] = on_false.Clone();
}
return absl::OkStatus();
}
return DefaultAction(select);
}
namespace {
absl::StatusOr<Literal> CreateScalarLiteral(int64_t value,
PrimitiveType element_type) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return LiteralUtil::CreateR0(
static_cast<NativeTypeOf<primitive_type_constant>>(value));
}
return InvalidArgument("Unsupported element type.");
},
element_type);
}
absl::StatusOr<Literal> TryParseAndEvaluateWhileInductionVar(
const HloInstruction* while_hlo) {
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_hlo, {});
if (!parsed_while_loop.has_value() || parsed_while_loop->is_dynamic()) {
return FailedPrecondition(
"Cannot evaluate a while loop's induction variable since the loop "
"does not match a known loop pattern or the loop is not static.");
}
int64_t induction_var_value =
parsed_while_loop->static_while_loop->induction_var_init_value +
parsed_while_loop->static_while_loop->trip_count *
parsed_while_loop->static_while_loop->step_size;
Shape result_shape = while_hlo->shape().tuple_shapes(
parsed_while_loop->static_while_loop->induction_var_index);
TF_ASSIGN_OR_RETURN(
Literal result,
CreateScalarLiteral(induction_var_value, result_shape.element_type()));
std::vector<Literal*> while_result_element_ptrs;
while_result_element_ptrs.reserve(while_hlo->shape().tuple_shapes_size());
std::vector<Literal> while_result_elements(
while_hlo->shape().tuple_shapes_size());
for (int i = 0; i < while_hlo->shape().tuple_shapes_size(); ++i) {
if (i == parsed_while_loop->static_while_loop->induction_var_index) {
while_result_element_ptrs.push_back(&result);
} else {
const Shape& shape = while_hlo->shape().tuple_shapes(i);
while_result_elements[i] =
Literal::CreateFromShapeWithUnknownLeafArrays(shape);
while_result_element_ptrs.push_back(&while_result_elements[i]);
}
}
return LiteralUtil::MakeTuple(while_result_element_ptrs);
}
}
absl::Status HloEvaluator::HandleWhile(const HloInstruction* while_hlo) {
const HloComputation* cond_comp = while_hlo->while_condition();
const HloComputation* body_comp = while_hlo->while_body();
auto lcv = GetEvaluatedLiteralFor(while_hlo->operand(0)).Clone();
if (!lcv.IsKnown()) {
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_hlo,
{});
evaluated_[while_hlo] =
Literal::CreateFromShapeWithUnknownLeafArrays(while_hlo->shape());
if (!parsed_while_loop.has_value() || parsed_while_loop->is_dynamic() ||
visitor_shape_index_.size() != 1 ||
parsed_while_loop->static_while_loop->induction_var_index !=
visitor_shape_index_[0]) {
return absl::OkStatus();
}
Shape induction_var_shape =
ShapeUtil::GetSubshape(while_hlo->shape(), visitor_shape_index_);
int64_t trip_count = parsed_while_loop->static_while_loop->trip_count;
TF_ASSIGN_OR_RETURN(
Literal induction_var_val,
CreateScalarLiteral(trip_count, induction_var_shape.element_type()));
TF_RETURN_IF_ERROR(evaluated_[while_hlo].CopyFrom(
induction_var_val, visitor_shape_index_,
{}));
return absl::OkStatus();
}
bool keep_going = true;
int64_t iteration_count = 0;
std::unique_ptr<HloEvaluator> cond_evaluator =
CreateEmbedded(max_loop_iterations_);
cond_evaluator->set_dynamic_dimension_inference(dynamic_dimension_inference_);
std::unique_ptr<HloEvaluator> loop_body_evaluator =
CreateEmbedded(max_loop_iterations_);
loop_body_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
while (keep_going) {
if (max_loop_iterations_ >= 0 && iteration_count++ > max_loop_iterations_) {
absl::StatusOr<Literal> result =
TryParseAndEvaluateWhileInductionVar(while_hlo);
if (result.ok()) {
lcv = std::move(result).value();
break;
} else {
return InvalidArgument("Loop %s exceeded loop iteration limit (%d).",
while_hlo->name(), max_loop_iterations_);
}
}
TF_ASSIGN_OR_RETURN(auto cond_val,
cond_evaluator->Evaluate(*cond_comp, {&lcv}));
keep_going = cond_val.GetFirstElement<bool>();
if (keep_going) {
TF_ASSIGN_OR_RETURN(auto body_val,
loop_body_evaluator->Evaluate(*body_comp, {&lcv}));
VLOG(3) << "Loop iteration result: " << body_val.ToString();
lcv = std::move(body_val);
cond_evaluator->ResetVisitStates();
loop_body_evaluator->ResetVisitStates();
}
}
evaluated_[while_hlo] = std::move(lcv);
return absl::OkStatus();
}
namespace {
template <typename NativeT>
Literal ExtractLiteralFromIndexPositions(const Literal& from,
absl::Span<int64_t const> indices) {
absl::InlinedVector<NativeT, 10> values;
for (int64_t index : indices) {
values.push_back(from.Get<NativeT>({index}));
}
return LiteralUtil::CreateR1<NativeT>(values);
}
absl::StatusOr<Literal> ExtractFromIndexPositions(
const Literal& from, absl::Span<int64_t const> indices) {
PrimitiveType type = from.shape().element_type();
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return ExtractLiteralFromIndexPositions<
NativeTypeOf<primitive_type_constant>>(from, indices);
}
return InvalidArgument("Unsupported type for Sort: %s",
PrimitiveType_Name(type));
},
type);
}
void IterateThroughWindow(
const Shape& window_shape, const Window& window, const Shape& base_shape,
const absl::Span<const int64_t> window_count_index,
const std::function<void(absl::Span<const int64_t>)>& f) {
const int64_t rank = base_shape.rank();
DimensionVector window_index(rank);
std::fill(window_index.begin(), window_index.end(), 0);
do {
DimensionVector base_index(rank);
bool out_of_bound = false;
for (int64_t i = 0; i < rank; ++i) {
base_index[i] = window_count_index[i] * window.dimensions(i).stride() +
window_index[i] * window.dimensions(i).window_dilation() -
window.dimensions(i).padding_low();
if (base_index[i] % window.dimensions(i).base_dilation() != 0) {
out_of_bound = true;
break;
}
base_index[i] /= window.dimensions(i).base_dilation();
if (base_index[i] < 0 || base_index[i] >= base_shape.dimensions(i)) {
out_of_bound = true;
break;
}
}
if (!out_of_bound) {
f(base_index);
}
} while (IndexUtil::BumpIndices(window_shape, absl::MakeSpan(window_index)));
}
template <typename Fp, typename Uint, typename ResultT>
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
std::function<ResultT(Fp, Uint)> stochastic_convert_op =
[](Fp operand, Uint random) -> ResultT {
bool is_negative = static_cast<bool>(Eigen::numext::signbit(operand));
if (Eigen::numext::isinf(operand)) {
return is_negative ? std::numeric_limits<ResultT>::min()
: std::numeric_limits<ResultT>::max();
}
if (Eigen::numext::isnan(operand)) {
return static_cast<ResultT>(0);
}
if (operand >= static_cast<Fp>(std::numeric_limits<ResultT>::max())) {
return std::numeric_limits<ResultT>::max();
}
if (operand <= static_cast<Fp>(std::numeric_limits<ResultT>::min())) {
return std::numeric_limits<ResultT>::min();
}
operand = Eigen::numext::abs(operand);
auto truncated = static_cast<ResultT>(operand);
Fp fractional = operand - static_cast<Fp>(truncated);
if (fractional == Fp{0}) {
return is_negative ? -truncated : truncated;
}
auto fixed_fractional = static_cast<Uint>(std::ldexp(
static_cast<double>(fractional), std::numeric_limits<Uint>::digits));
if (random < fixed_fractional) {
if (truncated == std::numeric_limits<ResultT>::max()) {
return std::numeric_limits<ResultT>::min();
}
truncated++;
}
return is_negative ? -truncated : truncated;
};
Literal result(result_shape);
TF_RETURN_IF_ERROR(
result.Populate<ResultT>([&](absl::Span<const int64_t> multi_index) {
return stochastic_convert_op(operand_literal.Get<Fp>(multi_index),
random_literal.Get<Uint>(multi_index));
}));
return std::move(result);
}
template <PrimitiveType operand_type, PrimitiveType random_type,
PrimitiveType result_type>
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
return StochasticConvertOp<
typename primitive_util::PrimitiveTypeToNative<operand_type>::type,
typename primitive_util::PrimitiveTypeToNative<random_type>::type,
typename primitive_util::PrimitiveTypeToNative<result_type>::type>(
operand_literal, random_literal, result_shape);
}
template <PrimitiveType operand_type, PrimitiveType random_type>
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsSignedIntegralType(
primitive_type_constant)) {
return StochasticConvertOp<operand_type, random_type,
primitive_type_constant>(
operand_literal, random_literal, result_shape);
}
return Unimplemented(
"Stochastically converting from type %s to type %s is not "
"implemented.",
PrimitiveType_Name(operand_literal.shape().element_type()),
PrimitiveType_Name(result_shape.element_type()));
},
result_shape.element_type());
}
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
return StochasticConvertOp<
primitive_type_constant,
primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(primitive_type_constant))>(
operand_literal, random_literal, result_shape);
}
return Unimplemented(
"Stochastically converting from type %s to type %s is not "
"implemented.",
PrimitiveType_Name(operand_literal.shape().element_type()),
PrimitiveType_Name(result_shape.element_type()));
},
operand_literal.shape().element_type());
}
}
absl::Status HloEvaluator::HandleReverse(const HloInstruction* reverse) {
const Shape& result_shape = reverse->shape();
const auto reverse_dimensions = reverse->dimensions();
auto operand = reverse->operand(0);
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferReverseShape(operand->shape(), reverse_dimensions));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
Literal result(result_shape);
const size_t element_byte_size =
primitive_util::ByteWidth(result_shape.element_type());
auto* operand_base = static_cast<const char*>(operand_literal.untyped_data());
TF_RETURN_IF_ERROR(result.PopulateInplaceParallel(
[&](void* dest, absl::Span<const int64_t> out_index, int) {
std::vector<int64_t> from_index(out_index.begin(), out_index.end());
for (const int64_t dim : reverse_dimensions) {
from_index[dim] = result_shape.dimensions(dim) - 1 - out_index[dim];
}
auto* src =
operand_base +
(element_byte_size * IndexUtil::MultidimensionalIndexToLinearIndex(
operand_literal.shape(), from_index));
std::memcpy(dest, src, element_byte_size);
}));
evaluated_[reverse] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSelectAndScatter(
const HloInstruction* select_and_scatter) {
auto operand = select_and_scatter->operand(0);
auto source = select_and_scatter->operand(1);
const Window& window = select_and_scatter->window();
const Literal& init_literal =
GetEvaluatedLiteralFor(select_and_scatter->operand(2));
TF_RET_CHECK(ShapeUtil::IsScalar(init_literal.shape()));
TF_ASSIGN_OR_RETURN(Literal result,
init_literal.Broadcast(select_and_scatter->shape(), {}));
std::vector<int64_t> window_dimension_sizes;
for (const auto& window_dimension : window.dimensions()) {
window_dimension_sizes.push_back(window_dimension.size());
}
const Shape window_shape = ShapeUtil::MakeShape(
operand->shape().element_type(), window_dimension_sizes);
const HloComputation* select = select_and_scatter->select();
const HloComputation* scatter = select_and_scatter->scatter();
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const Literal& source_literal = GetEvaluatedLiteralFor(source);
int64_t rank = operand_literal.shape().rank();
HloEvaluator embedded_evaluator(max_loop_iterations_);
DimensionVector source_index(rank, 0);
do {
std::optional<Literal> selected_val;
std::optional<DimensionVector> selected_index;
IterateThroughWindow(
window_shape, window, operand_literal.shape(), source_index,
[&](absl::Span<const int64_t> operand_index) {
auto curr_val =
LiteralUtil::GetScalarLiteral(operand_literal, operand_index);
if (!selected_val.has_value()) {
selected_val.emplace(curr_val.Clone());
selected_index.emplace(operand_index.begin(), operand_index.end());
}
Literal computed_result =
embedded_evaluator
.Evaluate(*select, {&selected_val.value(), &curr_val})
.value();
bool selected = !computed_result.Get<bool>({});
if (selected) {
*selected_val = std::move(curr_val);
selected_index.emplace(operand_index.begin(), operand_index.end());
}
embedded_evaluator.ResetVisitStates();
});
IterateThroughWindow(
window_shape, window, operand_literal.shape(), source_index,
[&](absl::Span<const int64_t> operand_index) {
if (std::equal(operand_index.begin(), operand_index.end(),
selected_index->begin())) {
auto source =
LiteralUtil::GetScalarLiteral(source_literal, source_index);
auto scattered =
LiteralUtil::GetScalarLiteral(result, operand_index);
Literal computed_result =
embedded_evaluator.Evaluate(*scatter, {&source, &scattered})
.value();
LiteralUtil::SetScalarLiteral(result, operand_index,
computed_result);
embedded_evaluator.ResetVisitStates();
}
});
} while (
IndexUtil::BumpIndices(source->shape(), absl::MakeSpan(source_index)));
evaluated_[select_and_scatter] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSlice(const HloInstruction* slice) {
auto operand = slice->operand(0);
const Shape& shape = slice->shape();
TF_ASSIGN_OR_RETURN(auto inferred_return_shape,
ShapeInference::InferSliceShape(
operand->shape(), slice->slice_starts(),
slice->slice_limits(), slice->slice_strides()));
TF_RET_CHECK(ShapeUtil::Compatible(shape, inferred_return_shape))
<< "return shape set to: " << ShapeUtil::HumanString(shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
const int64_t rank = operand->shape().rank();
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const size_t element_byte_size =
primitive_util::ByteWidth(shape.element_type());
auto* operand_base = static_cast<const char*>(operand_literal.untyped_data());
auto func = [&](void* dest, absl::Span<const int64_t> out_index, int) {
DimensionVector operand_index(rank);
for (int64_t i = 0; i < rank; ++i) {
operand_index[i] =
slice->slice_starts(i) + out_index[i] * slice->slice_strides(i);
}
auto* src = operand_base + (element_byte_size *
IndexUtil::MultidimensionalIndexToLinearIndex(
operand_literal.shape(), operand_index));
std::memcpy(dest, src, element_byte_size);
};
Literal result(shape);
TF_RETURN_IF_ERROR(result.PopulateInplaceParallel(func));
evaluated_[slice] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSort(const HloInstruction* sort) {
TF_RET_CHECK(sort->operand_count() >= 1)
<< "Expected at least 1 operand for sort";
for (int64_t i = 1; i < sort->operand_count(); ++i) {
TF_RET_CHECK(ShapeUtil::SameDimensions(sort->operand(0)->shape(),
sort->operand(i)->shape()))
<< "All Sort operands must have the same dimensions";
}
if (VLOG_IS_ON(3)) {
for (int64_t i = 0; i < sort->operand_count(); ++i) {
VLOG(3) << "HandleSort operand " << i << " literal: "
<< GetEvaluatedLiteralFor(sort->operand(i)).ToString();
}
}
Shape key_shape = sort->operand(0)->shape();
auto rank = key_shape.rank();
std::vector<Literal> result_literals;
result_literals.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
result_literals.emplace_back(sort->operand(i)->shape());
}
std::vector<int64_t> zero_base(rank, 0);
std::vector<int64_t> increment(rank, 1);
int64_t sort_dim = sort->dimensions(0);
int64_t sort_dim_elements = key_shape.dimensions(sort_dim);
TF_RET_CHECK(sort_dim >= 0 && sort_dim < increment.size())
<< "Unexpected out-of-bound sort dimension " << sort_dim
<< " accessing increment of size " << increment.size();
increment[sort_dim] = sort_dim_elements;
auto comparator =
[sort](absl::Span<const Literal> literals_to_sort, int64_t a, int64_t b,
HloEvaluator* embedded_evaluator) -> absl::StatusOr<bool> {
absl::InlinedVector<Literal, 8> literals;
literals.reserve(2 * sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
literals.push_back(
LiteralUtil::GetScalarLiteral(literals_to_sort[i], {a}));
literals.push_back(
LiteralUtil::GetScalarLiteral(literals_to_sort[i], {b}));
}
absl::InlinedVector<const Literal*, 8> literal_ptrs;
absl::c_transform(literals, std::back_inserter(literal_ptrs),
[](const Literal& literal) { return &literal; });
TF_ASSIGN_OR_RETURN(
auto computed_result,
embedded_evaluator->Evaluate(*sort->to_apply(), literal_ptrs));
embedded_evaluator->ResetVisitStates();
return computed_result.Get<bool>({});
};
auto less_than =
[&comparator](absl::Span<const Literal> literals_to_sort, int64_t a,
int64_t b,
HloEvaluator* embedded_evaluator) -> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(bool a_is_smaller,
comparator(literals_to_sort, a, b, embedded_evaluator));
#ifndef NDEBUG
TF_ASSIGN_OR_RETURN(bool b_is_smaller,
comparator(literals_to_sort, b, a, embedded_evaluator));
TF_RET_CHECK(!(b_is_smaller && a_is_smaller));
TF_ASSIGN_OR_RETURN(bool b_is_reflexive,
comparator(literals_to_sort, b, b, embedded_evaluator));
TF_RET_CHECK(!b_is_reflexive);
TF_ASSIGN_OR_RETURN(bool a_is_reflexive,
comparator(literals_to_sort, a, a, embedded_evaluator));
TF_RET_CHECK(!a_is_reflexive);
#endif
return a_is_smaller;
};
std::function<absl::Status(absl::Span<const Literal>, absl::Span<int64_t>,
absl::Span<int64_t>, absl::Span<int64_t>,
std::vector<int64_t>&, HloEvaluator*)>
merge = [&](absl::Span<const Literal> literals_to_sort,
absl::Span<int64_t> lhs, absl::Span<int64_t> rhs,
absl::Span<int64_t> output, std::vector<int64_t>& tmp,
HloEvaluator* embedded_evaluator) -> absl::Status {
tmp.clear();
tmp.reserve(output.size());
while (!lhs.empty() && !rhs.empty()) {
TF_ASSIGN_OR_RETURN(bool rhs_is_smaller,
less_than(literals_to_sort, rhs.front(), lhs.front(),
embedded_evaluator));
if (rhs_is_smaller) {
tmp.push_back(rhs.front());
rhs.remove_prefix(1);
} else {
tmp.push_back(lhs.front());
lhs.remove_prefix(1);
}
}
absl::c_copy(lhs, std::back_inserter(tmp));
absl::c_copy(rhs, std::back_inserter(tmp));
absl::c_copy(tmp, output.begin());
return absl::OkStatus();
};
auto* env = tsl::Env::Default();
const int max_parallelism = tsl::port::MaxParallelism();
constexpr size_t kMinElementsPerThread{1024};
const size_t useful_parallelism = std::min<size_t>(
sort_dim_elements / kMinElementsPerThread, max_parallelism);
const size_t work_per_thread = useful_parallelism > 1
? sort_dim_elements / useful_parallelism
: std::numeric_limits<size_t>::max();
std::function<absl::Status(absl::Span<const Literal>, absl::Span<int64_t>,
std::vector<int64_t>*, HloEvaluator*)>
mergesort = [&merge, &mergesort, &less_than, this, env, work_per_thread](
absl::Span<const Literal> literals_to_sort,
absl::Span<int64_t> to_sort,
std::vector<int64_t>* scratch,
HloEvaluator* embedded_evaluator) -> absl::Status {
if (to_sort.size() < 2) {
return absl::OkStatus();
}
size_t halfway = to_sort.size() / 2;
auto lhs = to_sort.subspan(0, halfway);
auto rhs = to_sort.subspan(halfway);
std::unique_ptr<HloEvaluator> thread_local_embedded_evaluator;
if (embedded_evaluator == nullptr) {
thread_local_embedded_evaluator = CreateEmbedded(max_loop_iterations_);
embedded_evaluator = thread_local_embedded_evaluator.get();
}
constexpr size_t kMinElementsForMergesort{9};
if (to_sort.size() >= kMinElementsForMergesort) {
std::unique_ptr<std::vector<int64_t>> thread_local_scratch;
if (!scratch) {
thread_local_scratch = std::make_unique<std::vector<int64_t>>();
scratch = thread_local_scratch.get();
}
absl::Status lhs_status;
if (to_sort.size() >= work_per_thread) {
std::unique_ptr<tsl::Thread> thread = absl::WrapUnique(env->StartThread(
tsl::ThreadOptions(), "XLA_mergesort",
[literals_to_sort, lhs, &mergesort, &lhs_status] {
lhs_status = mergesort(literals_to_sort, lhs, nullptr, nullptr);
}));
TF_RETURN_IF_ERROR(
mergesort(literals_to_sort, rhs, scratch, embedded_evaluator));
thread.reset();
} else {
TF_RETURN_IF_ERROR(
mergesort(literals_to_sort, rhs, scratch, embedded_evaluator));
lhs_status =
mergesort(literals_to_sort, lhs, scratch, embedded_evaluator);
}
TF_RETURN_IF_ERROR(lhs_status);
TF_RETURN_IF_ERROR(merge(literals_to_sort, lhs, rhs, to_sort, *scratch,
embedded_evaluator));
} else {
for (auto i = to_sort.begin(); i != to_sort.end(); ++i) {
auto len = i - to_sort.begin();
auto ub = to_sort.begin();
auto needle = *i;
while (len != 0) {
auto half_len = len / 2;
auto midpoint = ub + half_len;
TF_ASSIGN_OR_RETURN(bool is_smaller,
less_than(literals_to_sort, needle, *midpoint,
embedded_evaluator));
if (is_smaller) {
len = half_len;
} else {
ub = midpoint + 1;
len -= half_len + 1;
}
}
std::rotate(ub, i, i + 1);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
key_shape, zero_base, key_shape.dimensions(), increment,
[&](absl::Span<const int64_t> indices) -> absl::StatusOr<bool> {
std::vector<int64_t> limit_indices(indices.begin(), indices.end());
absl::c_for_each(limit_indices, [](int64_t& index) { ++index; });
limit_indices[sort_dim] = sort_dim_elements;
std::vector<Literal> literals_to_sort;
literals_to_sort.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
TF_ASSIGN_OR_RETURN(auto literal_to_sort,
GetEvaluatedLiteralFor(sort->operand(i))
.Slice(indices, limit_indices)
.Reshape({sort_dim_elements}));
literals_to_sort.push_back(std::move(literal_to_sort));
}
std::vector<int64_t> indices_to_sort(sort_dim_elements);
std::iota(indices_to_sort.begin(), indices_to_sort.end(), 0);
TF_RETURN_IF_ERROR(mergesort(literals_to_sort,
absl::MakeSpan(indices_to_sort), nullptr,
nullptr));
std::vector<int64_t> slice_dimensions(rank, 1);
slice_dimensions[sort_dim] = sort_dim_elements;
std::vector<int64_t> start_indices(rank, 0);
for (int64_t i = 0; i < sort->operand_count(); ++i) {
TF_ASSIGN_OR_RETURN(
Literal sorted_literal,
ExtractFromIndexPositions(literals_to_sort[i], indices_to_sort));
TF_ASSIGN_OR_RETURN(auto sorted_literal_reshaped,
sorted_literal.Reshape(slice_dimensions));
TF_RETURN_IF_ERROR(result_literals[i].CopySliceFrom(
sorted_literal_reshaped, start_indices, indices,
slice_dimensions));
}
return true;
}));
if (sort->operand_count() == 1) {
evaluated_[sort] = std::move(result_literals[0]);
} else {
std::vector<const Literal*> literal_ptrs;
absl::c_transform(result_literals, std::back_inserter(literal_ptrs),
[](const Literal& literal) { return &literal; });
Literal result_tuple = LiteralUtil::MakeTuple(literal_ptrs);
VLOG(3) << "HandleSort result_tuple: " << result_tuple.ToString();
evaluated_[sort] = std::move(result_tuple);
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleStochasticConvert(
const HloInstruction* stochastic_convert) {
const HloInstruction* operand = stochastic_convert->operand(0);
const HloInstruction* random = stochastic_convert->operand(1);
const Shape& result_shape = stochastic_convert->shape();
TF_RET_CHECK(ShapeUtil::SameDimensions(operand->shape(), random->shape()));
TF_RET_CHECK(ShapeUtil::SameDimensions(operand->shape(), result_shape));
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const Literal& random_literal = GetEvaluatedLiteralFor(random);
TF_ASSIGN_OR_RETURN(
evaluated_[stochastic_convert],
StochasticConvertOp(operand_literal, random_literal, result_shape));
return absl::OkStatus();
}
static bool IsScalarAdd(HloComputation* computation) {
HloInstruction* instruction = computation->root_instruction();
if (instruction->opcode() == HloOpcode::kAdd &&
computation->num_parameters() == 2) {
const HloInstruction* lhs = instruction->operand(0);
const HloInstruction* rhs = instruction->operand(1);
return lhs->opcode() == HloOpcode::kParameter &&
ShapeUtil::IsScalar(lhs->shape()) &&
rhs->opcode() == HloOpcode::kParameter &&
ShapeUtil::IsScalar(rhs->shape()) && lhs != rhs;
}
return false;
}
static absl::StatusOr<bool> PerformReductionStep(
bool is_tuple, absl::Span<const int64_t> input_index,
absl::Span<const int64_t> output_index,
absl::Span<const Literal* const> input_args, absl::Span<Literal> results,
HloComputation* computation, HloEvaluator* embedded_evaluator) {
int num_args = results.size();
absl::InlinedVector<Literal, 1> arg_values;
arg_values.reserve(num_args);
absl::InlinedVector<Literal, 1> accumulators;
accumulators.reserve(num_args);
for (int64_t i = 0; i < num_args; ++i) {
arg_values.emplace_back(
ShapeUtil::MakeShape(input_args[i]->shape().element_type(), {}));
accumulators.emplace_back(
ShapeUtil::MakeShape(input_args[i]->shape().element_type(), {}));
arg_values[i].CopyElementFrom(*input_args[i], input_index, {});
accumulators[i].CopyElementFrom(results[i], output_index, {});
}
absl::InlinedVector<Literal*, 2> embedded_operands;
for (Literal& accumulator : accumulators) {
embedded_operands.push_back(&accumulator);
}
for (Literal& local_input : arg_values) {
embedded_operands.push_back(&local_input);
}
TF_ASSIGN_OR_RETURN(
Literal computed_result,
embedded_evaluator->Evaluate(*computation, embedded_operands));
embedded_evaluator->ResetVisitStates();
if (is_tuple) {
std::vector<Literal> computed_results = computed_result.DecomposeTuple();
for (int64_t i = 0; i < num_args; ++i) {
results[i].CopyElementFrom(computed_results[i], {}, output_index);
}
} else {
results[0].CopyElementFrom(computed_result, {}, output_index);
}
return true;
}
static absl::StatusOr<bool> GenerateReduceOutputElement(
bool is_tuple, bool use_fast_path, absl::Span<const int64_t> output_index,
absl::Span<const Literal* const> init_values,
absl::Span<const Literal* const> input_args, absl::Span<Literal> results,
HloComputation* function, HloEvaluator* embedded_evaluator,
absl::Span<const int64_t> arg_dim_steps,
absl::Span<const int64_t> arg_dim_counts,
absl::Span<const int64_t> result_to_arg_index) {
bool use_fast_add = use_fast_path &&
ShapeUtil::ElementIsFloating(init_values[0]->shape()) &&
IsScalarAdd(function) && !is_tuple;
const Shape& arg_shape = input_args[0]->shape();
absl::Span<const int64_t> arg_dimensions = arg_shape.dimensions();
std::vector<int64_t> base(arg_dimensions.size());
for (int64_t i = 0; i < output_index.size(); ++i) {
base[result_to_arg_index[i]] = output_index[i];
}
for (int64_t i = 0; i < results.size(); ++i) {
results[i].CopyElementFrom(*init_values[i], {}, output_index);
}
if (use_fast_add) {
double computed_result = *init_values[0]->GetAsDouble({});
const Literal* input_arg0 = input_args[0];
const Shape& shape = input_arg0->shape();
absl::Span<const int64_t> minor_to_major = LayoutUtil::MinorToMajor(shape);
static constexpr int kChunkSize = 512;
int64_t linear_indices[kChunkSize];
int n_linear_indices = 0;
auto reduction_step = [&](absl::Span<const int64_t> input_index) -> bool {
linear_indices[n_linear_indices++] =
IndexUtil::MultidimensionalIndexToLinearIndex(shape, minor_to_major,
input_index);
if (n_linear_indices == kChunkSize) {
computed_result += *input_arg0->GetSumAsDouble(
absl::MakeConstSpan(linear_indices, n_linear_indices));
n_linear_indices = 0;
}
return true;
};
ShapeUtil::ForEachIndexNoStatus(arg_shape, base, arg_dim_counts,
arg_dim_steps, reduction_step);
if (n_linear_indices > 0) {
computed_result += *input_arg0->GetSumAsDouble(
absl::MakeConstSpan(linear_indices, n_linear_indices));
}
TF_RETURN_IF_ERROR(results[0].SetFromDouble(output_index, computed_result));
return true;
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
arg_shape, base, arg_dim_counts, arg_dim_steps,
[&](absl::Span<const int64_t> input_index) {
return PerformReductionStep(is_tuple, input_index, output_index,
input_args, results, function,
embedded_evaluator);
}));
return true;
}
absl::Status HloEvaluator::HandleReduce(const HloInstruction* hlo) {
const HloReduceInstruction* reduce = Cast<HloReduceInstruction>(hlo);
int64_t num_args = reduce->inputs().size();
absl::Span<const int64_t> dimensions_to_reduce(reduce->dimensions());
HloComputation* function = reduce->to_apply();
absl::InlinedVector<const Shape*, 1> operand_shapes;
for (const HloInstruction* operand : reduce->operands()) {
operand_shapes.push_back(&operand->shape());
}
TF_ASSIGN_OR_RETURN(auto inferred_return_shape,
ShapeInference::InferReduceShape(
operand_shapes, dimensions_to_reduce,
function->ComputeProgramShape()));
TF_RET_CHECK(ShapeUtil::CompatibleIgnoringFpPrecision(reduce->shape(),
inferred_return_shape))
<< "return shape is set to: " << ShapeUtil::HumanString(reduce->shape())
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
absl::InlinedVector<const Literal*, 1> input_args(num_args);
absl::InlinedVector<const Literal*, 1> init_values(num_args);
for (int64_t i = 0; i < num_args; ++i) {
input_args[i] = &GetEvaluatedLiteralFor(reduce->inputs()[i]);
VLOG(3) << "HandleReduce arg_literal: " << input_args[i]->ToString();
init_values[i] = &GetEvaluatedLiteralFor(reduce->init_values()[i]);
VLOG(3) << "HandleReduce init_literal: " << init_values[i]->ToString();
TF_RET_CHECK(ShapeUtil::IsScalar(init_values[i]->shape()));
}
const Shape& arg_shape = input_args[0]->shape();
const Shape& out_shape = inferred_return_shape;
bool is_tuple = out_shape.IsTuple();
const Shape& output_shape = inferred_return_shape.IsTuple()
? inferred_return_shape.tuple_shapes(0)
: inferred_return_shape;
absl::Span<const int64_t> arg_dimensions = arg_shape.dimensions();
std::vector<int64_t> arg_dim_steps(arg_dimensions.size());
std::vector<int64_t> arg_dim_counts(arg_dimensions.size());
for (const int64_t dim : dimensions_to_reduce) {
arg_dim_steps[dim] = 1;
arg_dim_counts[dim] = arg_dimensions[dim];
}
std::vector<int64_t> result_to_arg_index;
for (int64_t i = 0; i < arg_dimensions.size(); ++i) {
if (arg_dim_steps[i] == 0) {
result_to_arg_index.push_back(i);
}
}
const int num_threads = ShapeUtil::GetForEachIndexParallelThreadCount() + 1;
std::vector<std::unique_ptr<HloEvaluator>> embedded_evaluators;
embedded_evaluators.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
embedded_evaluators.push_back(CreateEmbedded(max_loop_iterations_));
}
absl::InlinedVector<Literal, 1> results(num_args);
for (int64_t i = 0; i < num_args; ++i) {
results[i] = Literal(is_tuple ? out_shape.tuple_shapes(i) : out_shape);
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexParallelWithStatus(
output_shape, [&](absl::Span<const int64_t> output_index, int thread_id) {
return GenerateReduceOutputElement(
is_tuple, use_fast_path_reduce_, output_index, init_values,
input_args, absl::Span<Literal>(results), function,
embedded_evaluators[thread_id + 1].get(), arg_dim_steps,
arg_dim_counts, result_to_arg_index);
}));
if (is_tuple) {
Literal tuple_result(inferred_return_shape);
for (int64_t i = 0; i < num_args; ++i) {
TF_CHECK_OK(tuple_result.MoveFrom(std::move(results[i]), {i}));
}
evaluated_[reduce] = std::move(tuple_result);
} else {
CHECK_EQ(results.size(), 1);
evaluated_[reduce] = std::move(results[0]);
}
if (!ShapeUtil::Compatible(reduce->shape(), inferred_return_shape)) {
TF_ASSIGN_OR_RETURN(evaluated_[reduce],
evaluated_[reduce].ConvertToShape(reduce->shape()));
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleReduceWindow(const HloInstruction* hlo) {
auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo);
const Window& window = reduce_window->window();
HloComputation* function = reduce_window->to_apply();
TF_ASSIGN_OR_RETURN(auto inferred_return_shape,
ShapeInference::InferReduceWindowShape(
reduce_window->input_shapes(),
reduce_window->init_value_shapes(), window,
function->ComputeProgramShape()));
TF_RET_CHECK(
ShapeUtil::Compatible(reduce_window->shape(), inferred_return_shape))
<< "return shape is set to: "
<< ShapeUtil::HumanStringWithLayout(reduce_window->shape())
<< " but is inferred to be: "
<< ShapeUtil::HumanStringWithLayout(inferred_return_shape);
absl::InlinedVector<const Literal*, 2> input_literal_vec, init_literal_vec;
auto input_arrays = reduce_window->inputs();
auto init_values = reduce_window->init_values();
int64_t num_args = input_arrays.size();
for (int i = 0; i < num_args; ++i) {
const Literal& input_literal = GetEvaluatedLiteralFor(input_arrays[i]);
VLOG(3) << "HandleReduceWindow arg_literal: " << input_literal.ToString();
input_literal_vec.push_back(&input_literal);
const Literal& init_literal = GetEvaluatedLiteralFor(init_values[i]);
VLOG(3) << "HandleReduceWindow init_literal: " << init_literal.ToString();
TF_RET_CHECK(ShapeUtil::IsScalar(init_literal.shape()));
init_literal_vec.push_back(&init_literal);
}
absl::InlinedVector<int64_t, 2> window_dimension_sizes;
for (const auto& window_dimension : window.dimensions()) {
window_dimension_sizes.push_back(window_dimension.size());
}
const Shape window_shape = ShapeUtil::MakeShape(
input_arrays[0]->shape().element_type(), window_dimension_sizes);
const int num_threads = ShapeUtil::GetForEachIndexParallelThreadCount() + 1;
std::vector<std::unique_ptr<HloEvaluator>> embedded_evaluators;
embedded_evaluators.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
embedded_evaluators.push_back(CreateEmbedded(max_loop_iterations_));
}
auto evaluate_impl = [&init_literal_vec, &window_shape, &window,
&input_literal_vec, &embedded_evaluators, function,
&inferred_return_shape](
absl::Span<const int64_t> output_index,
int thread_id) -> absl::InlinedVector<Literal, 2> {
const int embedded_evaluator_index = thread_id + 1;
CHECK_GE(embedded_evaluator_index, 0);
CHECK_LT(embedded_evaluator_index, embedded_evaluators.size());
HloEvaluator& embedded_evaluator =
*embedded_evaluators[embedded_evaluator_index];
absl::InlinedVector<Literal, 2> computed_result;
computed_result.reserve(init_literal_vec.size());
for (const auto* init : init_literal_vec) {
computed_result.push_back(init->Clone());
}
IterateThroughWindow(
window_shape, window, input_literal_vec[0]->shape(), output_index,
[&](absl::Span<const int64_t> operand_index) -> void {
absl::InlinedVector<const Literal*, 2> args;
for (auto& curr_result_val : computed_result) {
VLOG(2) << "Pushing:" << curr_result_val.ToString() << "\n";
args.push_back(&curr_result_val);
}
absl::InlinedVector<Literal, 2> curr_val_literal_vec;
curr_val_literal_vec.reserve(input_literal_vec.size());
for (const auto* input_literal : input_literal_vec) {
curr_val_literal_vec.push_back(Literal(ShapeUtil::MakeShape(
input_literal->shape().element_type(), {})));
curr_val_literal_vec.back().CopyElementFrom(*input_literal,
operand_index, {});
VLOG(2) << "Pushing:" << curr_val_literal_vec.back().ToString()
<< "\n";
args.push_back(&curr_val_literal_vec.back());
}
computed_result[0] =
embedded_evaluator.Evaluate(*function, args).value();
VLOG(2) << "Computed result:" << computed_result[0].ToString()
<< "\n";
embedded_evaluator.ResetVisitStates();
if (inferred_return_shape.IsTuple()) {
auto decomposed = computed_result[0].DecomposeTuple();
computed_result.clear();
computed_result.reserve(decomposed.size());
for (int i = 0; i < decomposed.size(); ++i) {
computed_result.push_back(std::move(decomposed[i]));
}
}
});
VLOG(2) << "Final result size:" << computed_result.size() << "\n";
for (const auto& res : computed_result) {
VLOG(2) << res.ToString() << "\n";
}
return computed_result;
};
Literal result(inferred_return_shape);
if (inferred_return_shape.IsTuple()) {
absl::InlinedVector<Literal, 1> results(num_args);
for (int64_t i = 0; i < num_args; ++i) {
results[i] = Literal(inferred_return_shape.tuple_shapes(i));
}
ShapeUtil::ForEachIndexParallel(
inferred_return_shape.tuple_shapes(0),
[&results, &evaluate_impl](absl::Span<const int64_t> output_index,
int thread_id) -> bool {
absl::InlinedVector<Literal, 2> computed_result_vec =
evaluate_impl(output_index, thread_id);
for (int i = 0; i < computed_result_vec.size(); ++i) {
results[i].CopyElementFrom(computed_result_vec[i], {},
output_index);
}
return true;
});
result = Literal::MoveIntoTuple(absl::MakeSpan(results));
VLOG(2) << "Final result is:" << result.ToString() << "\n";
} else {
TF_RETURN_IF_ERROR(Apply<PopulateParallelImpl>(
result, [&evaluate_impl](absl::Span<const int64_t> output_index,
int thread_id) {
return std::move(evaluate_impl(output_index, thread_id)[0]);
}));
}
VLOG(2) << "Final result is:" << result.ToString() << "\n";
evaluated_[reduce_window] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleMap(const HloInstruction* map) {
auto operands = map->operands();
const HloComputation* computation = map->to_apply();
Literal result(map->shape());
HloEvaluator embedded_evaluator(max_loop_iterations_);
TF_RETURN_IF_ERROR(
Apply<PopulateImpl>(result, [&](absl::Span<const int64_t> multi_index) {
std::vector<Literal> arg_literals;
arg_literals.reserve(operands.size());
for (auto operand : operands) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(
LiteralUtil::GetScalarLiteral(arg_literal, multi_index));
}
Literal computed_result =
embedded_evaluator.Evaluate(*computation, arg_literals).value();
embedded_evaluator.ResetVisitStates();
return computed_result;
}));
evaluated_[map] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCustomCall(const HloInstruction* custom_call) {
if (!custom_call_handler_) {
return DefaultAction(custom_call);
}
std::vector<const Literal*> operands;
operands.reserve(custom_call->operand_count());
for (const HloInstruction* operand : custom_call->operands()) {
operands.push_back(&GetEvaluatedLiteralFor(operand));
}
TF_ASSIGN_OR_RETURN(
auto output, custom_call_handler_(custom_call, absl::MakeSpan(operands)));
evaluated_[custom_call] = std::move(output);
return absl::OkStatus();
}
absl::Status HloEvaluator::Preprocess(const HloInstruction* hlo) {
VLOG(3) << "About to visit HLO: " << hlo->ToString();
if (!enable_partial_evaluation_) {
for (const HloInstruction* operand : hlo->operands()) {
if (!IsAlreadyEvaluated(operand) ||
!GetEvaluatedLiteralFor(operand).IsKnown()) {
return tsl::errors::FailedPrecondition(
"Failed to evaluate instruction since its operands are unknown "
"or undetermined and partial evaluation is not enabled.");
}
}
}
return ShapeUtil::ValidateShape(hlo->shape());
}
absl::Status HloEvaluator::Postprocess(const HloInstruction* hlo) {
VLOG(3) << "Finished visiting " << hlo->ToString()
<< "; evaluated value is: " << GetEvaluatedLiteralFor(hlo).ToString();
auto evaluated_shape = GetEvaluatedLiteralFor(hlo).shape();
xla::Shape hlo_shape = hlo->shape();
if (hlo_shape.IsArray() && !hlo_shape.has_layout()) {
*hlo_shape.mutable_layout() =
LayoutUtil::GetDefaultLayoutForShape(hlo_shape);
}
if (evaluated_shape.has_layout() && hlo_shape.has_layout() &&
!Layout::Equal().MinorToMajorOnly()(evaluated_shape.layout(),
hlo_shape.layout())) {
evaluated_.at(hlo) = evaluated_.at(hlo).Relayout(hlo_shape);
}
return absl::OkStatus();
}
namespace {
template <typename T>
std::unique_ptr<Array2D<T>> MatmulArray2DImpl(
const Array2D<T>& lhs, const Array2D<T>& rhs,
const std::function<void(const void* run_options_ptr, T* out, T* lhs,
T* rhs, int64_t m, int64_t n, int64_t k,
int32_t transpose_lhs, int32_t transpose_rhs)>&
impl_fn) {
CHECK_EQ(lhs.width(), rhs.height());
int m = lhs.height();
int n = rhs.width();
int k = lhs.width();
auto result = std::make_unique<Array2D<T>>(m, n);
impl_fn(
nullptr, result->data(), rhs.data(), lhs.data(), n, m,
k,
0,
0);
return result;
}
}
std::unique_ptr<Array2D<Eigen::half>> HloEvaluator::MatmulArray2D(
const Array2D<Eigen::half>& lhs, const Array2D<Eigen::half>& rhs) {
return MatmulArray2DImpl<Eigen::half>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF16);
}
std::unique_ptr<Array2D<float>> HloEvaluator::MatmulArray2D(
const Array2D<float>& lhs, const Array2D<float>& rhs) {
return MatmulArray2DImpl<float>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF32);
}
std::unique_ptr<Array2D<double>> HloEvaluator::MatmulArray2D(
const Array2D<double>& lhs, const Array2D<double>& rhs) {
return MatmulArray2DImpl<double>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF64);
}
std::unique_ptr<Array2D<std::complex<float>>> HloEvaluator::MatmulArray2D(
const Array2D<std::complex<float>>& lhs,
const Array2D<std::complex<float>>& rhs) {
return MatmulArray2DImpl<std::complex<float>>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulC64);
}
std::unique_ptr<Array2D<std::complex<double>>> HloEvaluator::MatmulArray2D(
const Array2D<std::complex<double>>& lhs,
const Array2D<std::complex<double>>& rhs) {
return MatmulArray2DImpl<std::complex<double>>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulC128);
}
std::unique_ptr<Array2D<int32_t>> HloEvaluator::MatmulArray2D(
const Array2D<int32_t>& lhs, const Array2D<int32_t>& rhs) {
return MatmulArray2DImpl<int32_t>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulS32);
}
std::unique_ptr<Array2D<uint8_t>> HloEvaluator::MatmulArray2D(
const Array2D<uint8_t>& lhs, const Array2D<uint8_t>& rhs) {
return MatmulArray2DImpl<uint8_t>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulU8);
}
std::unique_ptr<Array2D<float>> Array2DF8E5M2ToF32(
const Array2D<tsl::float8_e5m2>& input) {
auto result = std::make_unique<Array2D<float>>(input.height(), input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) = static_cast<float>(input(rowno, colno));
}
}
return result;
}
std::unique_ptr<Array2D<float>> Array2DF8E4M3FNToF32(
const Array2D<tsl::float8_e4m3fn>& input) {
auto result = std::make_unique<Array2D<float>>(input.height(), input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) = static_cast<float>(input(rowno, colno));
}
}
return result;
}
std::unique_ptr<Array2D<tsl::float8_e5m2>> Array2DF32ToF8E5M2(
const Array2D<float>& input) {
auto result = std::make_unique<Array2D<tsl::float8_e5m2>>(input.height(),
input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) =
static_cast<tsl::float8_e5m2>(input(rowno, colno));
}
}
return result;
}
std::unique_ptr<Array2D<tsl::float8_e4m3fn>> Array2DF32ToF8E4M3FN(
const Array2D<float>& input) {
auto result = std::make_unique<Array2D<tsl::float8_e4m3fn>>(input.height(),
input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) =
static_cast<tsl::float8_e4m3fn>(input(rowno, colno));
}
}
return result;
}
static bool promote_f8_to_f32 = true;
std::unique_ptr<Array2D<tsl::float8_e5m2>> HloEvaluator::MatmulArray2D(
const Array2D<tsl::float8_e5m2>& lhs,
const Array2D<tsl::float8_e5m2>& rhs) {
if (promote_f8_to_f32) {
auto lhs_float = Array2DF8E5M2ToF32(lhs);
auto rhs_float = Array2DF8E5M2ToF32(rhs);
auto result = MatmulArray2D(*lhs_float, *rhs_float);
return Array2DF32ToF8E5M2(*result);
} else {
return MatmulArray2DImpl<tsl::float8_e5m2>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF8E5M2);
}
}
std::unique_ptr<Array2D<tsl::float8_e4m3fn>> HloEvaluator::MatmulArray2D(
const Array2D<tsl::float8_e4m3fn>& lhs,
const Array2D<tsl::float8_e4m3fn>& rhs) {
if (promote_f8_to_f32) {
auto lhs_float = Array2DF8E4M3FNToF32(lhs);
auto rhs_float = Array2DF8E4M3FNToF32(rhs);
auto result = MatmulArray2D(*lhs_float, *rhs_float);
return Array2DF32ToF8E4M3FN(*result);
} else {
return MatmulArray2DImpl<tsl::float8_e4m3fn>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF8E4M3FN);
}
}
} | #include "xla/hlo/evaluator/hlo_evaluator.h"
#include <array>
#include <complex>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/xla_builder.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/hlo_element_type_converter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
static std::array<bool, 2> use_bf16_params{true, false};
class HloEvaluatorTest : public HloTestBase {
public:
HloEvaluatorTest() : use_bfloat16_(false) { InitializeFftData(); }
absl::StatusOr<Literal> Evaluate(
absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
HloElementTypeConverter(F32, BF16).Run(m_.get()).value();
}
return evaluator_.Evaluate(*m_->entry_computation(), arg_literals);
}
Literal EvaluateWithModule(
HloModule* module, absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
HloElementTypeConverter(F32, BF16).Run(m_.get()).value();
}
return evaluator_.Evaluate(*module->entry_computation(), arg_literals)
.value();
}
void TestUnaryOp(HloOpcode opcode, Literal expected, Literal input,
float aabs = 0) {
HloComputation::Builder b(TestName());
auto c1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
b.AddInstruction(HloInstruction::CreateUnary(expected.shape(), opcode, c1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto element_type = expected.shape().element_type();
if (element_type == F32 || element_type == F64) {
ErrorSpec error(aabs);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, error));
} else {
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
}
void TestBinaryOp(HloOpcode opcode, Literal expected, Literal lhs,
Literal rhs) {
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs)));
b.AddInstruction(
HloInstruction::CreateBinary(expected.shape(), opcode, c1, c2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestTernaryOp(HloOpcode opcode, Literal expected, Literal src0,
Literal src1, Literal src2) {
HloComputation::Builder b(TestName());
auto operand0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src0)));
auto operand1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src1)));
auto operand2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src2)));
b.AddInstruction(HloInstruction::CreateTernary(
expected.shape(), opcode, operand0, operand1, operand2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestEvaluateInstruction(HloInstruction* instruction,
const Literal& expected) {
TF_ASSERT_OK_AND_ASSIGN(Literal result, evaluator_.Evaluate(instruction));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestEvaluationFailure(HloInstruction* instruction) {
absl::StatusOr<Literal> result = evaluator_.Evaluate(instruction);
EXPECT_TRUE(!result.ok());
}
void TestRecursivelyEvaluateInstruction(HloInstruction* instruction,
const Literal& expected) {
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(
instruction, {},
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestRecursiveEvaluationFailure(HloInstruction* instruction) {
absl::StatusOr<Literal> result =
evaluator_.Evaluate(instruction, {},
true);
EXPECT_TRUE(!result.ok());
}
std::unique_ptr<HloComputation> MaxComputationScalarF32() {
HloComputation::Builder max_computation("max");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = max_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = max_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
max_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMaximum, param_lhs, param_rhs));
return max_computation.Build();
}
void ReduceWindowMaxIotaTest(int window_size, int padding, int stride,
int window_dilation, int base_dilation,
const Literal& expected) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(4, 4);
arg_array->FillIota(0);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
auto max_func = m_->AddEmbeddedComputation(MaxComputationScalarF32());
Window window;
WindowDimension dim;
dim.set_size(window_size);
dim.set_stride(stride);
dim.set_padding_low(padding);
dim.set_padding_high(padding);
dim.set_window_dilation(window_dilation);
dim.set_base_dilation(base_dilation);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
int dim0 = expected.shape().dimensions(0);
int dim1 = expected.shape().dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {dim0, dim1});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, max_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
protected:
explicit HloEvaluatorTest(bool use_bfloat16) : use_bfloat16_(use_bfloat16) {
InitializeFftData();
}
void InitializeFftData();
HloEvaluator evaluator_;
const bool use_bfloat16_;
std::unique_ptr<HloModule> m_ = CreateNewVerifiedModule();
ErrorSpec fft_error_ = ErrorSpec(1e-4, 1e-5);
Literal fft_c64x2x4x8_;
Literal fft_c64x2x4x8_1d_;
Literal fft_c64x2x4x8_2d_;
Literal fft_c64x2x4x8_3d_;
};
class HloEvaluatorBf16Test : public ::testing::WithParamInterface<bool>,
public HloEvaluatorTest {
protected:
HloEvaluatorBf16Test() : HloEvaluatorTest(GetParam()) {}
};
INSTANTIATE_TEST_SUITE_P(HloEvaluatorTest_Instantiation, HloEvaluatorBf16Test,
::testing::ValuesIn(use_bf16_params));
TEST_P(HloEvaluatorBf16Test, DoesClamp) {
auto low = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
auto value = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
auto high = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
Shape shape = low.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{0, 4}, {2, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DoesClampInt64) {
auto ones = [](int bits) { return (int64_t{1} << bits) - 1; };
auto low =
LiteralUtil::CreateR2<int64_t>({{0, ones(54)}, {ones(54), ones(58)}});
auto value = LiteralUtil::CreateR2<int64_t>({{0, ones(56)}, {0, ones(58)}});
auto high = LiteralUtil::CreateR2<int64_t>(
{{ones(54), ones(55)}, {ones(56), ones(58)}});
Shape shape = low.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected =
LiteralUtil::CreateR2<int64_t>({{0, ones(55)}, {ones(54), ones(58)}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DISABLED_DoesClampSpecialBroadcast) {
auto low = LiteralUtil::CreateR0<float>(0.f);
auto value = LiteralUtil::CreateR2<float>({{-1.f, 0.f}, {1.f, 2.f}});
auto high = LiteralUtil::CreateR0<float>(1.f);
Shape shape = value.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {1, 1}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DoesSelect) {
auto pred = LiteralUtil::CreateR2<bool>({{true, false}, {false, true}});
auto on_true = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
auto on_false = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
Shape shape = on_true.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(pred)));
auto c2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(on_true)));
auto c3 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(on_false)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kSelect, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
auto expected = LiteralUtil::CreateR2<float>({{2, 5}, {0, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesAdd) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-96, 8}});
TestBinaryOp(HloOpcode::kAdd, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_P(HloEvaluatorBf16Test, DoesAnd) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{0, 0}, {4, 4}});
TestBinaryOp(HloOpcode::kAnd, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesOr) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-100, 4}});
TestBinaryOp(HloOpcode::kOr, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesXor) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-104, 0}});
TestBinaryOp(HloOpcode::kXor, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesMultiply) {
auto lhs = LiteralUtil::CreateR2<int32_t>({{-1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int32_t>(
{{std::numeric_limits<int32_t>::min(), 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int32_t>(
{{std::numeric_limits<int32_t>::min(), 0}, {-400, 16}});
TestBinaryOp(HloOpcode::kMultiply, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesDivideInt64) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{0, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesClampS64) {
auto low = LiteralUtil::CreateR1<int64_t>(
{-8616761059752331528LL, 6780561065411491190LL, -8616761059752331528LL});
auto value = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491190LL, 6780561065411491180LL, 4241131823772864090LL});
auto high = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491180LL, 8616761059752331528LL, 3832151243857508051LL});
auto expected = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491190LL, 6780561065411491190LL, 3832151243857508051LL});
TestTernaryOp(HloOpcode::kClamp, std::move(expected), std::move(low),
std::move(value), std::move(high));
}
TEST_P(HloEvaluatorBf16Test, DoesDivideDouble) {
auto lhs = LiteralUtil::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}});
auto rhs = LiteralUtil::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}});
auto expected =
LiteralUtil::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesAbsR2) {
auto operand = LiteralUtil::CreateR2<int64_t>({{1, -20}, {-100, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{1, 20}, {100, 4}});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesAbsR0) {
auto operand = LiteralUtil::CreateR0<float>(-1.0f);
auto expected = LiteralUtil::CreateR0<float>(1.0f);
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesAbsR1WithZeroSize) {
auto operand = LiteralUtil::CreateR1<float>({});
auto expected = LiteralUtil::CreateR1<float>({});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_F(HloEvaluatorTest, DoesAbsC128) {
auto x = LiteralUtil::CreateR0<complex128>({1, 2});
auto expected_real = LiteralUtil::CreateR0<double>(2.23607);
TestUnaryOp(HloOpcode::kAbs, std::move(expected_real), std::move(x), 3e-06);
}
TEST_F(HloEvaluatorTest, DoesNegateR2) {
auto operand = LiteralUtil::CreateR2<int32_t>(
{{0, std::numeric_limits<int32_t>::min()}, {-1, 4}});
auto expected = LiteralUtil::CreateR2<int32_t>(
{{0, std::numeric_limits<int>::min()}, {1, -4}});
TestUnaryOp(HloOpcode::kNegate, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesCosR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{1, -1}, {-1, 1}});
TestUnaryOp(HloOpcode::kCos, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorBf16Test, DoesSinR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kSin, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorBf16Test, DoesTanR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kTan, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_F(HloEvaluatorTest, DoesNotR2) {
auto operand =
LiteralUtil::CreateR2<int32_t>({{0, std::numeric_limits<int>::min()},
{-1, std::numeric_limits<int>::max()}});
auto expected =
LiteralUtil::CreateR2<int32_t>({{-1, std::numeric_limits<int>::max()},
{0, std::numeric_limits<int>::min()}});
TestUnaryOp(HloOpcode::kNot, std::move(expected), std::move(operand));
}
TEST_F(HloEvaluatorTest, DoesRealC128) {
auto x = LiteralUtil::CreateR1<complex128>({{1, 0}, {-100, 4}});
auto expected_real = LiteralUtil::CreateR1<double>({1, -100});
TestUnaryOp(HloOpcode::kReal, std::move(expected_real), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesImagC128) {
auto x = LiteralUtil::CreateR1<complex128>({{1, 0}, {-100, 4}});
auto expected_imag = LiteralUtil::CreateR1<double>({0, 4});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_P(HloEvaluatorBf16Test, DoesImagF32AndBf16) {
auto x = LiteralUtil::CreateR1<float>({1, -100});
auto expected_imag = LiteralUtil::CreateR1<float>({0, 0});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesImagF64) {
auto x = LiteralUtil::CreateR1<double>({1, -100});
auto expected_imag = LiteralUtil::CreateR1<double>({0, 0});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesTraverseInstructions) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto rhs2 = LiteralUtil::CreateR2<int64_t>({{1, -20}, {-100, 4}});
std::vector<const Literal*> args = {&lhs, &rhs, &rhs2};
Shape shape = ShapeUtil::MakeShape(S64, {2, 2});
HloComputation::Builder b(TestName());
auto param_lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "lhs"));
auto param_rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "rhs"));
auto lhs_instruction = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto param_rhs2 =
b.AddInstruction(HloInstruction::CreateParameter(2, shape, "rhs2"));
b.AddInstruction(HloInstruction::CreateBinary(shape, HloOpcode::kAdd,
lhs_instruction, param_rhs2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate(args));
auto expected = LiteralUtil::CreateR2<int64_t>({{4, -16}, {-196, 12}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesReshape) {
HloComputation::Builder b(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal.Clone();
HloInstruction* literal_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5});
const int64_t permutation[] = {1, 2, 0, 4, 3};
b.AddInstruction(
HloInstruction::CreateTranspose(shape, literal_instruction, permutation));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
result.EachCell<NativeT>(
[&](absl::Span<const int64_t> indices, NativeT value) {
std::vector<int64_t> rindexes = PermuteInverse(indices, permutation);
EXPECT_NEAR(value, literal_clone.Get<NativeT>(rindexes), 0.031250);
});
}
TEST_F(HloEvaluatorTest, DoesBroadcast) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}, {5, 6}});
auto output_literal = LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}}, {{1, 2}, {3, 4}, {5, 6}}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateBroadcast(
output_literal.shape(), literal_instruction, {1, 2}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, output_literal));
}
TEST_F(HloEvaluatorTest, DoesBroadcastScalar) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR0<int32_t>(111);
auto output_literal = LiteralUtil::CreateR2<int32_t>(
{{111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateBroadcast(
output_literal.shape(), literal_instruction,
{}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, output_literal));
}
TEST_F(HloEvaluatorTest, DoesConcatenateSimple) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int64_t>({{-1, -2}, {100, 200}})));
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int64_t>({{-2, -3}, {-100, -200}})));
std::vector<HloInstruction*> operands = {operand1, operand2};
Shape shape = ShapeUtil::MakeShape(S64, {4, 2});
b.AddInstruction(HloInstruction::CreateConcatenate(shape, operands, 0));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<int64_t>(
{{-1, -2}, {100, 200}, {-2, -3}, {-100, -200}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ConcatenateHandlesShapeWithZeroElement) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int64_t>({100, 200})));
HloInstruction* operand2 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({})));
std::vector<HloInstruction*> operands = {operand1, operand2};
Shape shape = ShapeUtil::MakeShape(S64, {2});
b.AddInstruction(HloInstruction::CreateConcatenate(shape, operands, 0));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<int64_t>({100, 200});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ConvertWithSameLayout) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}, {5, 6}});
auto expected =
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
ASSERT_TRUE(LayoutUtil::LayoutsInShapesEqual(input_literal.shape(),
expected.shape()));
HloInstruction* constant = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateConvert(expected.shape(), constant));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_P(HloEvaluatorBf16Test, ConvertWithDifferentLayout) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2}, {3, 4}, {5, 6}}, LayoutUtil::MakeLayout({0, 1}));
auto expected = LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, LayoutUtil::MakeLayout({1, 0}));
ASSERT_FALSE(LayoutUtil::LayoutsInShapesEqual(input_literal.shape(),
expected.shape()));
HloInstruction* constant = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateConvert(expected.shape(), constant));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
PaddingConfig CreatePaddingConfig(
std::initializer_list<std::array<int64_t, 3>> padding_dimensions) {
PaddingConfig padding_config;
for (auto& paddings_per_dim : padding_dimensions) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(paddings_per_dim[0]);
dimension->set_edge_padding_high(paddings_per_dim[1]);
dimension->set_interior_padding(paddings_per_dim[2]);
}
return padding_config;
}
TEST_F(HloEvaluatorTest, Pad2DIntegerArrayWithZeroDimension) {
auto operand = LiteralUtil::CreateR2<int32_t>({{}, {}});
HloComputation::Builder b(TestName());
auto operand_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(operand)));
constexpr int32_t kPadValue = 10;
auto pad_value = LiteralUtil::CreateR0<int32_t>(kPadValue);
auto padding_value_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
auto padding_config = CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}});
Shape shape = ShapeUtil::MakeShape(S32, {5, 2});
b.AddInstruction(HloInstruction::CreatePad(
shape, operand_instruction, padding_value_instruction, padding_config));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<int32_t>(
{{10, 10}, {10, 10}, {10, 10}, {10, 10}, {10, 10}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Pad4DFloatArrayWithInteriorPadding) {
HloComputation::Builder b(TestName());
Array4D<float> input_array(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
constexpr float kPadValue = 1.5;
auto pad_value = LiteralUtil::CreateR0<float>(kPadValue);
HloInstruction* pad_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 5, 1, 1});
auto r4_padding_on_dim0_dim1 =
CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}, {{0, 0, 0}}, {{0, 0, 0}}});
b.AddInstruction(HloInstruction::CreatePad(
shape, input_instruction, pad_instruction, r4_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array4D<float>>(8, 5, 1, 1);
expected_array->Fill(kPadValue);
(*expected_array)(1, 0, 0, 0) = 1.0f;
(*expected_array)(1, 2, 0, 0) = 2.0f;
(*expected_array)(4, 0, 0, 0) = 3.0f;
(*expected_array)(4, 2, 0, 0) = 4.0f;
(*expected_array)(7, 0, 0, 0) = 5.0f;
(*expected_array)(7, 2, 0, 0) = 6.0f;
auto expected = LiteralUtil::CreateR4FromArray4D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, NegativePadding2D) {
HloComputation::Builder b(TestName());
auto input_array = std::make_unique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
auto r2_padding_on_dim0_dim1 =
CreatePaddingConfig({{{-1, -2, 0}}, {{-2, 4, 0}}});
Shape shape = ShapeUtil::MakeShape(F32, {1, 5});
b.AddInstruction(HloInstruction::CreatePad(shape, input_instruction,
pad_value_instruction,
r2_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array2D<float>>(1, 5);
(*expected_array)(0, 0) = 7.0f;
(*expected_array)(0, 1) = 2.718f;
(*expected_array)(0, 2) = 2.718f;
(*expected_array)(0, 3) = 2.718f;
(*expected_array)(0, 4) = 2.718f;
auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(0.031250)));
}
TEST_P(HloEvaluatorBf16Test, NegativeAndInteriorPadding2D) {
HloComputation::Builder b(TestName());
auto input_array = std::make_unique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
PaddingConfig padding_config = MakeNoPaddingConfig(2);
auto r2_padding_on_dim0_dim1 =
CreatePaddingConfig({{{-2, -5, 1}}, {{-2, 4, 2}}});
Shape shape = ShapeUtil::MakeShape(F32, {0, 9});
b.AddInstruction(HloInstruction::CreatePad(shape, input_instruction,
pad_value_instruction,
r2_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array2D<float>>(0, 9);
auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, Pad2DFloatArrayDifferentTypes) {
HloComputation::Builder b(TestName());
b.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(BF16, {5, 2}),
b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<bfloat16>({{}, {}}))),
b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(10.0f))),
CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}})));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
bfloat16 bf16_c(10.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<bfloat16>({{bf16_c, bf16_c},
{bf16_c, bf16_c},
{bf16_c, bf16_c},
{bf16_c, bf16_c},
{bf16_c, bf16_c}}),
result));
}
TEST_P(HloEvaluatorBf16Test, DotRank2AndRank1) {
HloComputation::Builder b(TestName());
auto lhs_array = std::make_unique<Array2D<float>>(4, 1);
lhs_array->FillUnique(1.0f);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_literal = LiteralUtil::CreateR2<float>({{1, 2}});
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {4, 2});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = Array2D<float>({
{1.f, 2.f},
{2.f, 4.f},
{3.f, 6.f},
{4.f, 8.f},
});
auto expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DotRank1AndRank2) {
HloComputation::Builder b(TestName());
auto lhs_literal = LiteralUtil::CreateR1<float>({1, 2, 3});
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_array = std::make_unique<Array2D<float>>(3, 2);
rhs_array->FillUnique(1.0f);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {2});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(0);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<float>({22.f, 28.f});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DotRank2AndRank2) {
HloComputation::Builder b(TestName());
auto lhs_array = std::make_unique<Array2D<float>>(4, 3);
lhs_array->FillUnique(1.0f);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_array = std::make_unique<Array2D<float>>(3, 2);
rhs_array->FillUnique(1.0f);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {4, 2});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = Array2D<float>({
{22.f, 28.f},
{58.f, 76.f},
{94.f, 124.f},
{130.f, 172.f},
});
auto expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DotRank4AndRank4) {
HloComputation::Builder b(TestName());
auto lhs_array = std::make_unique<Array4D<float>>(2, 2, 3, 1);
lhs_array->FillIota(1.0f);
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_array = std::make_unique<Array4D<float>>(2, 2, 3, 1);
rhs_array->FillIota(2.0f);
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 1, 1});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_batch_dimensions(0);
dot_dnums.add_rhs_batch_dimensions(0);
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_lhs_contracting_dimensions(2);
dot_dnums.add_rhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(2);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
float expected_1 = 0;
for (float i = 1.0f; i < 7.0f; ++i) {
expected_1 += i * i + i;
}
float expected_2 = 0;
for (float i = 7.0f; i < 13.0f; ++i) {
expected_2 += i * i + i;
}
auto expected_array = Array3D<float>({{{expected_1}}, {{expected_2}}});
auto expected = LiteralUtil::CreateR3FromArray3D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, SimpleConv1D) {
HloComputation::Builder b(TestName());
Array3D<float> lhs_array = {{{1, 2, 3}}};
auto lhs_literal = LiteralUtil::CreateR3FromArray3D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array3D<float> rhs_array = {{{3.f, 4.f}}};
auto rhs_literal = LiteralUtil::CreateR3FromArray3D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.set_input_feature_dimension(1);
dnums.set_output_feature_dimension(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(1);
dnums.add_kernel_spatial_dimensions(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 3});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array3D<float> expected_array = {{{11.f, 18.f, 9.f}}};
auto expected = LiteralUtil::CreateR3FromArray3D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Simple4x4Conv2DWith2x2Kernel) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 4, 4});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 4, 4);
expected_array.FillWithYX(Array2D<float>({
{100, 126, 152, 76},
{204, 230, 256, 124},
{308, 334, 360, 172},
{149, 160, 171, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Conv2DGeneralDimensionsReversed) {
HloComputation::Builder b(TestName());
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 7, 13},
{4, 10, 16}},
{{2, 8, 14},
{5, 11, 17}},
{{3, 9, 15},
{6, 12, 18}}
}});
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(weight);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
rhs_instruction = b.AddInstruction(HloInstruction::CreateReverse(
rhs_instruction->shape(), rhs_instruction, {3, 1}));
Window window;
WindowDimension dim;
dim.set_size(3);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
dim.set_window_reversal(true);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(2);
dnums.set_output_batch_dimension(2);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.add_kernel_spatial_dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 2});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2688}}}});
auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Conv2DGeneralDimensions) {
HloComputation::Builder b(TestName());
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 7, 13},
{4, 10, 16}},
{{2, 8, 14},
{5, 11, 17}},
{{3, 9, 15},
{6, 12, 18}}
}});
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(weight);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(3);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(2);
dnums.set_output_batch_dimension(2);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.add_kernel_spatial_dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 2});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2688}}}});
auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DilatedBaseConv2DWithHighPadding) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(2);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 7, 7});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 7, 7);
expected_array.FillWithYX(Array2D<float>({
{5, 12, 10, 18, 15, 24, 20},
{35, 48, 42, 56, 49, 64, 56},
{25, 36, 30, 42, 35, 48, 40},
{63, 80, 70, 88, 77, 96, 84},
{45, 60, 50, 66, 55, 72, 60},
{91, 112, 98, 120, 105, 128, 112},
{65, 84, 70, 90, 75, 96, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DilatedBaseConv2DWithLowAndHighPadding) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(1);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(2);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 8, 8});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 8, 8);
expected_array.FillWithYX(Array2D<float>({
{8, 7, 16, 14, 24, 21, 32, 28},
{6, 5, 12, 10, 18, 15, 24, 20},
{40, 35, 48, 42, 56, 49, 64, 56},
{30, 25, 36, 30, 42, 35, 48, 40},
{72, 63, 80, 70, 88, 77, 96, 84},
{54, 45, 60, 50, 66, 55, 72, 60},
{104, 91, 112, 98, 120, 105, 128, 112},
{78, 65, 84, 70, 90, 75, 96, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test,
DilatedWindowAndBaseConv2DWithDifferentLowAndHighPaddingAndStrides) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 3);
rhs_array.FillWithYX(Array2D<float>({
{5, 6, 7},
{8, 9, 10},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(2);
dim.set_padding_high(2);
dim.set_window_dilation(2);
dim.set_base_dilation(2);
*window.add_dimensions() = dim;
dim.set_size(3);
dim.set_stride(3);
dim.set_padding_low(2);
dim.set_padding_high(-1);
dim.set_window_dilation(1);
dim.set_base_dilation(3);
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 9, 3});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 9, 3);
expected_array.FillWithYX(Array2D<float>({
{10, 20, 30},
{0, 0, 0},
{57, 74, 91},
{0, 0, 0},
{125, 142, 159},
{0, 0, 0},
{193, 210, 227},
{0, 0, 0},
{91, 98, 105},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Conv2DGroupedConvolution) {
HloComputation::Builder b(TestName());
std::vector<int64_t> input_dims = {1, 2, 2, 4};
std::vector<int64_t> filter_dims = {2, 2, 2, 8};
Shape input_shape = ShapeUtil::MakeShapeWithType<float>(input_dims);
Shape filter_shape = ShapeUtil::MakeShapeWithType<float>(filter_dims);
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
std::vector<float> input_elems(ShapeUtil::ElementsIn(input_shape));
std::iota(input_elems.begin(), input_elems.end(), -7);
auto input_r1 = LiteralUtil::CreateR1<float>(input_elems);
auto input_r4 = input_r1.Reshape(input_dims).value();
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input_r4)));
std::vector<float> filter_elems(ShapeUtil::ElementsIn(filter_shape));
std::iota(filter_elems.begin(), filter_elems.end(), -31);
auto filter_r1 = LiteralUtil::CreateR1<float>(filter_elems);
auto filter_r4 = filter_r1.Reshape(filter_dims).value();
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(filter_r4)));
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 8});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction,
2, 1, window, dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 1, 8);
expected_array.FillWithYX(
Array2D<float>({{668, 664, 660, 656, 668, 680, 692, 704}}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void HloEvaluatorTest::InitializeFftData() {
fft_c64x2x4x8_ = LiteralUtil::CreateR3<complex64>({
{{{0.0, 0.0}, {1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0},
{4.0, 0.0}, {5.0, 0.0}, {6.0, 0.0}, {7.0, 0.0}},
{{0.0, 0.0}, {0.0, 1.0}, {0.0, 2.0}, {0.0, 3.0},
{0.0, 4.0}, {0.0, 5.0}, {0.0, 6.0}, {0.0, 7.0}},
{{0.0, 7.0}, {1.0, 6.0}, {2.0, 5.0}, {3.0, 4.0},
{4.0, 3.0}, {5.0, 2.0}, {6.0, 1.0}, {7.0, 0.0}},
{{7.0, 0.0}, {6.0, 1.0}, {5.0, 2.0}, {4.0, 3.0},
{3.0, 4.0}, {2.0, 5.0}, {1.0, 6.0}, {0.0, 7.0}}},
{{{-4.0, 0.0}, {-3.0, 0.0}, {-2.0, 0.0}, {-1.0, 0.0},
{1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0}, {4.0, 0.0}},
{{0.0, -4.0}, {0.0, -3.0}, {0.0, -2.0}, {0.0, -1.0},
{0.0, 1.0}, {0.0, 2.0}, {0.0, 3.0}, {0.0, 4.0}},
{{3.5, 3.5}, {-1.707107, -0.707107}, {-1.0, -0.0}, {-0.707107, 0.292893},
{-0.5, 0.5}, {-0.292893, 0.707107}, {0.0, 1.0}, {0.707107, 1.707107}},
{{3.5, 3.5}, {1.707107, 0.707107}, {1.0, 0.0}, {0.707107, -0.292893},
{0.5, -0.5}, {0.292893, -0.707107}, {-0.0, -1.0}, {-0.707107, -1.707107}}}
});
fft_c64x2x4x8_1d_ = LiteralUtil::CreateR3<complex64>({
{{{28.0, 0.0}, {-4.0, 9.656854}, {-4.0, 4.0}, {-4.0, 1.656854},
{-4.0, 0.0}, {-4.0, -1.656854}, {-4.0, -4.0}, {-4.0, -9.656854}},
{{0.0, 28.0}, {-9.656854, -4.0}, {-4.0, -4.0}, {-1.656854, -4.0},
{0.0, -4.0}, {1.656854, -4.0}, {4.0, -4.0}, {9.656854, -4.0}},
{{28.0, 28.0}, {5.656854, 13.656854}, {0.0, 8.0}, {-2.343146, 5.656854},
{-4.0, 4.0}, {-5.656854, 2.343146}, {-8.0, -0.0}, {-13.656854, -5.656854}},
{{28.0, 28.0}, {-5.656854, -13.656854}, {-0.0, -8.0}, {2.343146, -5.656854},
{4.0, -4.0}, {5.656854, -2.343146}, {8.0, 0.0}, {13.656854, 5.656854}}},
{{{0.0, 0.0}, {-5.0, 12.071068}, {-4.0, 4.0}, {-5.0, 2.071068},
{-4.0, 0.0}, {-5.0, -2.071068}, {-4.0, -4.0}, {-5.0, -12.071068}},
{{0.0, 0.0}, {-12.071068, -5.0}, {-4.0, -4.0}, {-2.071068, -5.0},
{0.0, -4.0}, {2.071068, -5.0}, {4.0, -4.0}, {12.071068, -5.0}},
{{0.0, 7.0}, {1.0, 6.0}, {2.0, 5.0}, {3.0, 4.0},
{4.0, 3.0}, {5.0, 2.0}, {6.0, 1.0}, {7.0, 0.0}},
{{7.0, 0.0}, {6.0, 1.0}, {5.0, 2.0}, {4.0, 3.0},
{3.0, 4.0}, {2.0, 5.0}, {1.0, 6.0}, {0.0, 7.0}}}
});
fft_c64x2x4x8_2d_ = LiteralUtil::CreateR3<complex64>({
{{{84.0, 84.0}, {-13.656854, 5.656854}, {-8.0, 0.0}, {-5.656854, -2.343146},
{-4.0, -4.0}, {-2.343146, -5.656854}, {0.0, -8.0}, {5.656854, -13.656854}},
{{0.0, 0.0}, {0.0, -0.0}, {0.0, 0.0}, {0.0, 0.0},
{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{28.0, -28.0}, {16.970562, 40.970562}, {0.0, 24.0}, {-7.029438, 16.970562},
{-12.0, 12.0}, {-16.970562, 7.029438}, {-24.0, 0.0}, {-40.970562, -16.970562}},
{{0.0, -56.0}, {-19.313708, -8.0}, {-8.0, -8.0}, {-3.313708, -8.0},
{0.0, -8.0}, {3.313708, -8.0}, {8.0, -8.0}, {19.313708, -8.0}}},
{{{7.0, 7.0}, {-10.071068, 14.071068}, {-1.0, 7.0}, {-0.071068, 4.071068},
{3.0, 3.0}, {4.071068, -0.071068}, {7.0, -1.0}, {14.071068, -10.071068}},
{{0.0, 0.0}, {-12.0, 24.142136}, {-12.0, 8.0}, {-16.0, 4.142136},
{-16.0, 0.0}, {-20.0, -4.142136}, {-20.0, -8.0}, {-24.0, -24.142136}},
{{-7.0, 7.0}, {2.071068, 22.071068}, {-3.0, 11.0}, {-3.928932, 8.071068},
{-3.0, 3.0}, {-4.071068, -0.071068}, {-3.0, -5.0}, {-10.071068, -14.071068}},
{{0.0, -14.0}, {0.0, -12.0}, {0.0, -10.0}, {0.0, -8.0},
{0.0, -6.0}, {0.0, -4.0}, {0.0, -2.0}, {0.0, 0.0}}}
});
fft_c64x2x4x8_3d_ = LiteralUtil::CreateR3<complex64>({
{{{91.0, 91.0}, {-23.727922, 19.727922}, {-9.0, 7.0}, {-5.727922, 1.727922},
{-1.0, -1.0}, {1.727922, -5.727922}, {7.0, -9}, {19.727922, -23.727922}},
{{0.0, 0.0}, {-12.0, 24.142136}, {-12.0, 8.0}, {-16.0, 4.142136},
{-16.0, 0.0}, {-20.0, -4.142136}, {-20.0, -8.0}, {-24.0, -24.142136}},
{{21.0, -21.0}, {19.041630, 63.041630}, {-3.0, 35.0}, {-10.958370, 25.041630},
{-15.0, 15.0}, {-21.041630, 6.958370}, {-27.0, -5.0}, {-51.041630, -31.041630}},
{{0.0, -70.0}, {-19.313708, -20.0}, {-8.0, -18.0}, {-3.313708, -16.0},
{0.0, -14.0}, {3.313708, -12.0}, {8.0, -10.0}, {19.313708, -8.0}}},
{{{77.0, 77.0}, {-3.585786, -8.414214}, {-7.0, -7.0}, {-5.585786, -6.414214},
{-7.0, -7.0}, {-6.414214, -5.585786}, {-7.0, -7.0}, {-8.414214, -3.585786}},
{{0.0, 0.0}, {12.0, -24.142136}, {12.0, -8.0}, {16.0, -4.142136},
{16.0, 0.0}, {20.0, 4.142136}, {20.0, 8.0}, {24.0, 24.142136}},
{{35.0, -35.0}, {14.899494, 18.899494}, {3.0, 13.0}, {-3.100506, 8.899494},
{-9.0, 9.0}, {-12.899494, 7.100506}, {-21.0, 5.0}, {-30.899494, -2.899494}},
{{0.0, -42.0}, {-19.313708, 4.0}, {-8.0, 2.0}, {-3.313708, 0.0},
{0.0, -2.0}, {3.313708, -4.0}, {8.0, -6.0}, {19.313708, -8.0}}}
});
}
TEST_F(HloEvaluatorTest, 1D_FFT_4_on_c64x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[4] parameter(0)
ROOT fft = c64[4] fft(operand), fft_type=FFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0}, {4.0, 0.0}});
auto expected = LiteralUtil::CreateR1<complex64>(
{{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}, {-2.0, -2.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IFFT_4_on_c64x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[4] parameter(0)
ROOT ifft = c64[4] fft(operand), fft_type=IFFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}, {-2.0, -2.0}});
auto expected = LiteralUtil::CreateR1<complex64>(
{{1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0}, {4.0, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_RFFT_4_on_f32x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[4] parameter(0)
ROOT rfft = c64[3] fft(operand), fft_type=RFFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
auto expected =
LiteralUtil::CreateR1<complex64>({{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IRFFT_4_on_c64x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3] parameter(0)
ROOT irfft = f32[4] fft(operand), fft_type=IRFFT, fft_length={4}
}
)";
auto input =
LiteralUtil::CreateR1<complex64>({{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}});
auto expected = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT fft = c64[2, 4, 8] fft(operand), fft_type=FFT, fft_length={8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_1d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_1d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IFFT_8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT ifft = c64[2, 4, 8] fft(operand), fft_type=IFFT, fft_length={8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_1d_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_RFFT_8_on_f32x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[8] parameter(0)
ROOT rfft = c64[5] fft(operand), fft_type=RFFT, fft_length={8}
}
)";
auto input =
LiteralUtil::CreateR1<float>({1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1});
auto expected = LiteralUtil::CreateR1<complex64>({{39.6, 0.0},
{-3.6, 8.691169},
{-3.6, 3.6},
{-3.6, 1.491169},
{-3.6, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IRFFT_8_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT irfft = f32[8] fft(operand), fft_type=IRFFT, fft_length={8}
}
)";
auto input = LiteralUtil::CreateR1<complex64>({{39.6, 0.0},
{-3.6, 8.691169},
{-3.6, 3.6},
{-3.6, 1.491169},
{-3.6, 0.0}});
auto expected =
LiteralUtil::CreateR1<float>({1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_RFFT_9_on_f32x9) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[9] parameter(0)
ROOT rfft = c64[5] fft(operand), fft_type=RFFT, fft_length={9}
}
)";
auto input = LiteralUtil::CreateR1<float>(
{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1, 9.9});
auto expected = LiteralUtil::CreateR1<complex64>({{49.5, 0.0},
{-3.360560, 11.705792},
{-3.893717, 5.712929},
{-4.5, 3.117691},
{-4.895723, 1.021942}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IRFFT_9_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT irfft = f32[9] fft(operand), fft_type=IRFFT, fft_length={9}
}
)";
auto input = LiteralUtil::CreateR1<complex64>({{49.5, 0.0},
{-3.360560, 11.705792},
{-3.893717, 5.712929},
{-4.5, 3.117691},
{-4.895723, 1.021942}});
auto expected = LiteralUtil::CreateR1<float>(
{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1, 9.9});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_FFT_4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT fft = c64[2, 4, 8] fft(operand), fft_type=FFT, fft_length={4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_2d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_2d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IFFT_4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT ifft = c64[2, 4, 8] fft(operand), fft_type=IFFT, fft_length={4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_2d_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_RFFT_3x8_on_f32x3x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 8] parameter(0)
ROOT rfft = c64[3, 5] fft(operand), fft_type=RFFT, fft_length={3, 8}
}
)";
auto input =
LiteralUtil::CreateR2<float>({{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1},
{8.1, 7.2, 6.3, 5.4, 4.5, 3.6, 2.7, 1.8},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8}});
auto expected = LiteralUtil::CreateR2<complex64>({{{118.8, 0.0},
{-4.4, 10.622540},
{-4.4, 4.4},
{-4.4, 1.822540},
{-4.4, 0.0}},
{{0.0, 0.0},
{-19.926162, 0.797280},
{-10.128203, -3.728203},
{-6.069756, -5.602720},
{-3.2, -6.928203}},
{{0.0, 0.0},
{13.526162, 14.653687},
{3.728203, 10.128203},
{-0.330244, 8.253687},
{-3.2, 6.928203}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IRFFT_3x8_on_c64x3x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 5] parameter(0)
ROOT irfft = f32[3, 8] fft(operand), fft_type=IRFFT, fft_length={3, 8}
}
)";
auto input = LiteralUtil::CreateR2<complex64>({{{118.8, 0.0},
{-4.4, 10.622540},
{-4.4, 4.4},
{-4.4, 1.822540},
{-4.4, 0.0}},
{{0.0, 0.0},
{-19.926162, 0.797280},
{-10.128203, -3.728203},
{-6.069756, -5.602720},
{-3.2, -6.928203}},
{{0.0, 0.0},
{13.526162, 14.653687},
{3.728203, 10.128203},
{-0.330244, 8.253687},
{-3.2, 6.928203}}});
auto expected =
LiteralUtil::CreateR2<float>({{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1},
{8.1, 7.2, 6.3, 5.4, 4.5, 3.6, 2.7, 1.8},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_RFFT_3x9_on_f32x3x9) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 9] parameter(0)
ROOT rfft = c64[3, 5] fft(operand), fft_type=RFFT, fft_length={3, 9}
}
)";
auto input = LiteralUtil::CreateR2<float>(
{{1.9, 2.8, 3.7, 4.6, 5.5, 6.4, 7.3, 8.2, 9.1},
{9.1, 8.2, 7.3, 6.4, 5.5, 4.6, 3.7, 2.8, 1.9},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}});
auto expected = LiteralUtil::CreateR2<complex64>({{{148.5, 0.0},
{-4.95, 13.600013},
{-4.95, 5.899180},
{-4.95, 2.857884},
{-4.95, 0.872819}},
{{0.0, 0.0},
{-25.014467, 2.096690},
{-12.888800, -3.503916},
{-8.1, -5.715768},
{-4.974333, -7.159452}},
{{0.0, 0.0},
{17.814467, 17.685147},
{5.688800, 12.084542},
{0.9, 9.872690},
{-2.225667, 8.429006}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IRFFT_3x9_on_c64x3x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 5] parameter(0)
ROOT irfft = f32[3, 9] fft(operand), fft_type=IRFFT, fft_length={3, 9}
}
)";
auto input = LiteralUtil::CreateR2<complex64>({{{148.5, 0.0},
{-4.95, 13.600013},
{-4.95, 5.899180},
{-4.95, 2.857884},
{-4.95, 0.872819}},
{{0.0, 0.0},
{-25.014467, 2.096690},
{-12.888800, -3.503916},
{-8.1, -5.715768},
{-4.974333, -7.159452}},
{{0.0, 0.0},
{17.814467, 17.685147},
{5.688800, 12.084542},
{0.9, 9.872690},
{-2.225667, 8.429006}}});
auto expected = LiteralUtil::CreateR2<float>(
{{1.9, 2.8, 3.7, 4.6, 5.5, 6.4, 7.3, 8.2, 9.1},
{9.1, 8.2, 7.3, 6.4, 5.5, 4.6, 3.7, 2.8, 1.9},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_2x4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT fft = c64[2, 4, 8] fft(operand), fft_type=FFT, fft_length={2, 4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_3d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_3d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IFFT_2x4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT ifft = c64[2, 4, 8] fft(operand), fft_type=IFFT, fft_length={2, 4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_3d_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_RFFT_3x3x4_on_f32x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 3, 4] parameter(0)
ROOT rfft = c64[3, 3, 3] fft(operand), fft_type=RFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<float>(
{{{1.8, 2.7, 3.6, 4.5}, {8.1, 7.2, 6.3, 5.4}, {1.1, 2.2, 3.3, 4.4}},
{{5.4, 6.3, 7.2, 8.1}, {4.5, 3.6, 2.7, 1.8}, {5.5, 6.6, 7.7, 8.8}},
{{-1.8, -2.7, -3.6, -4.5},
{-5.4, -6.3, -7.2, -8.1},
{1.9, 2.9, 3.9, 4.9}}});
auto expected = LiteralUtil::CreateR3<complex64>(
{{{{92.8, 0.0}, {-2.8, 2.8}, {-2.8, 0.0}},
{{-5.9, 35.160631}, {-11.519100, -8.919100}, {-1.3, -10.219100}},
{{-5.9, -35.160631}, {8.919100, 11.519100}, {-1.3, 10.219100}}},
{{{29.5, -81.579593}, {1.390897, 5.190897}, {-1.9, 3.290897}},
{{-25.1, -49.017038}, {1.044486, 4.844486}, {-1.9, 2.944486}},
{{11.8, 27.712813}, {1.517691, 4.717691}, {-1.6, 3.117691}}},
{{{29.5, 81.579593}, {-5.190897, -1.390897}, {-1.9, -3.290897}},
{{11.8, -27.712813}, {-4.717691, -1.517691}, {-1.6, -3.117691}},
{{-25.1, 49.017038}, {-4.844486, -1.044486}, {-1.9, -2.944486}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IRFFT_3x3x4_on_c64x3x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 3] parameter(0)
ROOT irfft = f32[3, 3, 4] fft(operand), fft_type=IRFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{92.8, 0.0}, {-2.8, 2.8}, {-2.8, 0.0}},
{{-5.9, 35.160631}, {-11.519100, -8.919100}, {-1.3, -10.219100}},
{{-5.9, -35.160631}, {8.919100, 11.519100}, {-1.3, 10.219100}}},
{{{29.5, -81.579593}, {1.390897, 5.190897}, {-1.9, 3.290897}},
{{-25.1, -49.017038}, {1.044486, 4.844486}, {-1.9, 2.944486}},
{{11.8, 27.712813}, {1.517691, 4.717691}, {-1.6, 3.117691}}},
{{{29.5, 81.579593}, {-5.190897, -1.390897}, {-1.9, -3.290897}},
{{11.8, -27.712813}, {-4.717691, -1.517691}, {-1.6, -3.117691}},
{{-25.1, 49.017038}, {-4.844486, -1.044486}, {-1.9, -2.944486}}}});
auto expected = LiteralUtil::CreateR3<float>(
{{{1.8, 2.7, 3.6, 4.5}, {8.1, 7.2, 6.3, 5.4}, {1.1, 2.2, 3.3, 4.4}},
{{5.4, 6.3, 7.2, 8.1}, {4.5, 3.6, 2.7, 1.8}, {5.5, 6.6, 7.7, 8.8}},
{{-1.8, -2.7, -3.6, -4.5},
{-5.4, -6.3, -7.2, -8.1},
{1.9, 2.9, 3.9, 4.9}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_RFFT_3x3x5_on_f32x3x3x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 3, 5] parameter(0)
ROOT rfft = c64[3, 3, 3] fft(operand), fft_type=RFFT, fft_length={3, 3, 5}
}
)";
auto input = LiteralUtil::CreateR3<float>({{{1.8, 2.7, 3.6, 4.5, 5.4},
{8.1, 7.2, 6.3, 5.4, 4.5},
{1.1, 2.2, 3.3, 4.4, 5.5}},
{{5.4, 6.3, 7.2, 8.1, 9.0},
{4.5, 3.6, 2.7, 1.8, 0.9},
{5.5, 6.6, 7.7, 8.8, 9.9}},
{{-1.8, -2.7, -3.6, -4.5, -5.4},
{-5.4, -6.3, -7.2, -8.1, -9.0},
{1.9, 2.9, 3.9, 4.9, 5.9}}});
auto expected = LiteralUtil::CreateR3<complex64>(
{{{{119.5, 0.0}, {-3.5, 4.817337}, {-3.5, 1.137219}},
{{-5.75, 56.724664}, {-19.206730, -10.537254}, {-5.775483, -12.245880}},
{{-5.75, -56.724664}, {15.956730, 15.010495}, {2.525483, 13.301869}}},
{{{39.25, -106.088112}, {3.286913, 7.382528}, {-1.038404, 4.885305}},
{{-29.0, -64.951905}, {2.690922, 6.949515}, {-1.179098, 4.452292}},
{{16.75, 30.743902}, {3.363918, 6.649878}, {-0.733751, 4.546954}}},
{{{39.25, 106.088112}, {-8.036913, -0.844714}, {-3.711596, -3.341936}},
{{16.75, -30.743902}, {-7.363918, -1.144350}, {-3.266249, -3.247275}},
{{-29.0, 64.951905}, {-7.440922, -0.411701}, {-3.570902, -2.908924}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IRFFT_3x3x5_on_c64x3x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 3] parameter(0)
ROOT irfft = f32[3, 3, 5] fft(operand), fft_type=IRFFT, fft_length={3, 3, 5}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{119.5, 0.0}, {-3.5, 4.817337}, {-3.5, 1.137219}},
{{-5.75, 56.724664}, {-19.206730, -10.537254}, {-5.775483, -12.245880}},
{{-5.75, -56.724664}, {15.956730, 15.010495}, {2.525483, 13.301869}}},
{{{39.25, -106.088112}, {3.286913, 7.382528}, {-1.038404, 4.885305}},
{{-29.0, -64.951905}, {2.690922, 6.949515}, {-1.179098, 4.452292}},
{{16.75, 30.743902}, {3.363918, 6.649878}, {-0.733751, 4.546954}}},
{{{39.25, 106.088112}, {-8.036913, -0.844714}, {-3.711596, -3.341936}},
{{16.75, -30.743902}, {-7.363918, -1.144350}, {-3.266249, -3.247275}},
{{-29.0, 64.951905}, {-7.440922, -0.411701}, {-3.570902, -2.908924}}}});
auto expected = LiteralUtil::CreateR3<float>({{{1.8, 2.7, 3.6, 4.5, 5.4},
{8.1, 7.2, 6.3, 5.4, 4.5},
{1.1, 2.2, 3.3, 4.4, 5.5}},
{{5.4, 6.3, 7.2, 8.1, 9.0},
{4.5, 3.6, 2.7, 1.8, 0.9},
{5.5, 6.6, 7.7, 8.8, 9.9}},
{{-1.8, -2.7, -3.6, -4.5, -5.4},
{-5.4, -6.3, -7.2, -8.1, -9.0},
{1.9, 2.9, 3.9, 4.9, 5.9}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_8_on_c64x2x4x8_with_layout) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8]{0, 2, 1} parameter(0)
ROOT fft = c64[2, 4, 8]{1, 2, 0} fft(operand), fft_type=FFT, fft_length={8}
}
)";
auto input = fft_c64x2x4x8_.Relayout(LayoutUtil::MakeLayout({0, 2, 1}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_1d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_1d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_FFT_4x8_on_c64x2x4x8_with_layout) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8]{2, 0, 1} parameter(0)
ROOT fft = c64[2, 4, 8]{1, 0, 2} fft(operand), fft_type=FFT, fft_length={4, 8}
}
)";
auto input = fft_c64x2x4x8_.Relayout(LayoutUtil::MakeLayout({2, 0, 1}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_2d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_2d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_2x4x8_on_c64x2x4x8_with_layout) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8]{1, 2, 0} parameter(0)
ROOT fft =
c64[2, 4, 8]{0, 2, 1} fft(operand), fft_type=FFT, fft_length={2, 4, 8}
}
)";
auto input = fft_c64x2x4x8_.Relayout(LayoutUtil::MakeLayout({1, 2, 0}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_3d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_3d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_0_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={0}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
auto expected = LiteralUtil::CreateR4<complex64>({{{{{0.0, 0.0}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_1_on_c64x1x1x1x0) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 0] parameter(0)
ROOT fft = c64[1, 1, 1, 0] fft(operand), fft_type=FFT, fft_length={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto input,
LiteralUtil::CreateR4<complex64>({{{{}}}}).Reshape({1, 1, 1, 0}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_1_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_1x0x1_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={1, 0, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
auto expected = LiteralUtil::CreateR4<complex64>({{{{{0.0, 0.0}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_1x1x1_on_c64x0x1x0x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[0, 1, 0, 1] parameter(0)
ROOT fft = c64[0, 1, 0, 1] fft(operand), fft_type=FFT, fft_length={1, 1, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto input,
LiteralUtil::CreateR4<complex64>({{{{}}}}).Reshape({0, 1, 0, 1}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_1x1x1_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={1, 1, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_3x1x1_on_c64x1x3x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 3, 1, 1] parameter(0)
ROOT fft = c64[1, 3, 1, 1] fft(operand), fft_type=FFT, fft_length={3, 1, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>(
{{{{{42.24, 24.42}}}, {{{-42.24, 24.42}}}, {{{42.24, -24.42}}}}});
auto expected =
LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}},
{{{84.5367, 97.5818}}},
{{{-0.0566792, -48.7418}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IFFT_3x1x1_on_c64x1x3x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 3, 1, 1] parameter(0)
ROOT ifft = c64[1, 3, 1, 1] fft(operand), fft_type=IFFT, fft_length={3, 1, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}},
{{{84.5367, 97.5818}}},
{{{-0.0566792, -48.7418}}}}});
auto expected = LiteralUtil::CreateR4<complex64>(
{{{{{42.24, 24.42}}}, {{{-42.24, 24.42}}}, {{{42.24, -24.42}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_5_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT fft = c64[5] fft(operand), fft_type=FFT, fft_length={5}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{1.0, 5.0}, {2.0, 4.0}, {3.0, 3.0}, {4.0, 2.0}, {5.0, 1.0}});
auto expected = LiteralUtil::CreateR1<complex64>({{15.0, 15.0},
{0.940955, 5.94095},
{-1.6877, 3.3123},
{-3.3123, 1.6877},
{-5.94095, -0.940955}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IFFT_5_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT ifft = c64[5] fft(operand), fft_type=IFFT, fft_length={5}
}
)";
auto input = LiteralUtil::CreateR1<complex64>({{15.0, 15.0},
{0.940955, 5.94095},
{-1.6877, 3.3123},
{-3.3123, 1.6877},
{-5.94095, -0.940955}});
auto expected = LiteralUtil::CreateR1<complex64>(
{{1.0, 5.0}, {2.0, 4.0}, {3.0, 3.0}, {4.0, 2.0}, {5.0, 1.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_4_on_zero_c64x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[4] parameter(0)
ROOT fft = c64[4] fft(operand), fft_type=FFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_3x3x4_on_zero_c64x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 4] parameter(0)
ROOT fft = c64[3, 3, 4] fft(operand), fft_type=FFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IFFT_3x3x4_on_zero_c64x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 4] parameter(0)
ROOT ifft = c64[3, 3, 4] fft(operand), fft_type=IFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_RFFT_3x3x4_on_zero_f32x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 3, 4] parameter(0)
ROOT rfft = c64[3, 3, 3] fft(operand), fft_type=RFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<float>(
{{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}});
auto expected = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IRFFT_3x3x4_on_zero_c64x3x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 3] parameter(0)
ROOT irfft = f32[3, 3, 4] fft(operand), fft_type=IRFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
auto expected = LiteralUtil::CreateR3<float>(
{{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IRFFT_3x4_on_c64x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3] parameter(0)
ROOT irfft = f32[3, 4] fft(operand), fft_type=IRFFT, fft_length={3, 4}
}
)";
auto input =
LiteralUtil::CreateR2<complex64>({{{0.0, 0.0}, {1.0, 0.0}, {2.0, 0.0}},
{{3.0, 0.0}, {4.0, 0.0}, {5.0, 0.0}},
{{6.0, 0.0}, {7.0, 0.0}, {8.0, 0.0}}});
auto expected =
LiteralUtil::CreateR2<float>({{4.0, -0.5, 0.0, -0.5},
{-1.5, 0.433013, 0.0, -0.433013},
{-1.5, -0.433013, 0.0, 0.433013}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
class HloEvaluatorPreciseReduceTest : public HloTestBase {};
TEST_F(HloEvaluatorPreciseReduceTest, AddReductionPrecisionTest) {
auto m = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
constexpr int kNumElements = 1 << 25;
std::vector<float> v(kNumElements, 1.0f);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(v)));
HloInstruction* init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m->AddEmbeddedComputation(add_computation.Build());
HloInstruction* reduce_instruction = b.AddInstruction(
HloInstruction::CreateReduce(scalar_shape, arg_instruction, init_value,
{0}, add_func));
m->AddEntryComputation(b.Build());
HloEvaluator hlo_eval;
Literal result = hlo_eval.Evaluate(reduce_instruction).value();
LiteralTestUtil::ExpectR0Equal<float>(kNumElements, result);
}
void BM_ReducePrecisely(::testing::benchmark::State& state) {
HloComputation::Builder b("BM_ReducePrecisely");
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
HloModule module("BM_ReducePrecisely", config);
constexpr int kNumElements = 1 << 25;
std::vector<float> v(kNumElements, 1.0f);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(v)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = module.AddEmbeddedComputation(add_computation.Build());
HloInstruction* reduce_instruction = b.AddInstruction(
HloInstruction::CreateReduce(scalar_shape, arg_instruction, init_value,
{0}, add_func));
module.AddEntryComputation(b.Build());
for (auto s : state) {
HloEvaluator hlo_eval;
hlo_eval.Evaluate(reduce_instruction).value();
}
}
BENCHMARK(BM_ReducePrecisely);
TEST_P(HloEvaluatorBf16Test, ReduceAdd) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m_->AddEmbeddedComputation(add_computation.Build());
Shape shape = ShapeUtil::MakeShape(F32, {2});
b.AddInstruction(
HloInstruction::CreateReduce(shape, arg_instruction, init_value,
{1}, add_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<float>({6, 18});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMax) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
auto max_func = m_->AddEmbeddedComputation(MaxComputationScalarF32());
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
Shape shape = ShapeUtil::MakeShape(F32, {1, 2});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, max_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{6, 7}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaWindowDilation) {
auto expected = LiteralUtil::CreateR2<float>({{10, 11}, {14, 15}});
ReduceWindowMaxIotaTest(
2,
0,
1,
2,
1,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaStrideWindowDilation) {
auto expected = LiteralUtil::CreateR2<float>({{10}});
ReduceWindowMaxIotaTest(
2,
0,
2,
2,
1,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaBaseDilation) {
auto expected = LiteralUtil::CreateR2<float>({{0, 1, 1, 2, 2, 3},
{4, 5, 5, 6, 6, 7},
{4, 5, 5, 6, 6, 7},
{8, 9, 9, 10, 10, 11},
{8, 9, 9, 10, 10, 11},
{12, 13, 13, 14, 14, 15}});
ReduceWindowMaxIotaTest(
2,
0,
1,
1,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaStrideBaseDilation) {
auto expected =
LiteralUtil::CreateR2<float>({{0, 1, 2}, {4, 5, 6}, {8, 9, 10}});
ReduceWindowMaxIotaTest(
2,
0,
2,
1,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaStrideBothDilation) {
auto expected =
LiteralUtil::CreateR2<float>({{5, 6, 7}, {9, 10, 11}, {13, 14, 15}});
ReduceWindowMaxIotaTest(
2,
0,
2,
2,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaPaddingStrideBaseDilation) {
auto expected =
LiteralUtil::CreateR2<float>({{0, 2, 3}, {8, 10, 11}, {12, 14, 15}});
ReduceWindowMaxIotaTest(
3,
1,
3,
1,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowAdd) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m_->AddEmbeddedComputation(add_computation.Build());
Window window;
WindowDimension dim;
dim.set_size(1);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(1);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, add_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{1, 3, 5}, {5, 11, 13}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowAdd6D) {
HloComputation::Builder b(TestName());
std::vector<int64_t> input_dims(6, 4);
Literal arg_literal =
LiteralUtil::CreateFullWithDescendingLayout<float>(input_dims, 1.0f);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m_->AddEmbeddedComputation(add_computation.Build());
Window window;
WindowDimension trivial_dim;
trivial_dim.set_size(1);
trivial_dim.set_stride(1);
trivial_dim.set_padding_low(0);
trivial_dim.set_padding_high(0);
trivial_dim.set_window_dilation(1);
trivial_dim.set_base_dilation(1);
WindowDimension active_dim;
active_dim.set_size(2);
active_dim.set_stride(1);
active_dim.set_padding_low(0);
active_dim.set_padding_high(0);
active_dim.set_window_dilation(1);
active_dim.set_base_dilation(1);
*window.add_dimensions() = trivial_dim;
*window.add_dimensions() = active_dim;
*window.add_dimensions() = active_dim;
*window.add_dimensions() = active_dim;
*window.add_dimensions() = trivial_dim;
*window.add_dimensions() = trivial_dim;
Shape shape = ShapeUtil::MakeShape(F32, {4, 3, 3, 3, 4, 4});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, add_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
std::vector<int64_t> output_dims = {4, 3, 3, 3, 4, 4};
Literal result_literal =
LiteralUtil::CreateFullWithDescendingLayout<float>(output_dims, 8.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(result_literal, result));
}
TEST_P(HloEvaluatorBf16Test, Min3In5Stride2Tuple) {
HloComputation::Builder builder("main");
auto input1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1})));
auto input2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1})));
HloComputation::Builder bcompute("ComputeFunction");
auto shape1 = ShapeUtil::MakeShape(F32, {});
auto shape2 = ShapeUtil::MakeShape(F32, {});
auto p2 =
bcompute.AddInstruction(HloInstruction::CreateParameter(0, shape1, "x0"));
auto p3 =
bcompute.AddInstruction(HloInstruction::CreateParameter(1, shape2, "x1"));
auto p4 =
bcompute.AddInstruction(HloInstruction::CreateParameter(2, shape1, "y0"));
auto p5 =
bcompute.AddInstruction(HloInstruction::CreateParameter(3, shape2, "y1"));
std::vector<HloInstruction*> compute_vec = {
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kMinimum, p2, p4)),
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kMinimum, p3, p5))};
bcompute.AddInstruction(HloInstruction::CreateTuple(compute_vec));
auto compute_tuple = m_->AddEmbeddedComputation(bcompute.Build());
std::vector<HloInstruction*> input_vec = {input1, input2};
auto init1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(F32)));
auto init2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(F32)));
std::vector<HloInstruction*> init_vec = {init1, init2};
auto padding = std::pair<int64_t, int64_t>(0, 0);
TF_ASSERT_OK_AND_ASSIGN(auto window,
ShapeInference::InferWindowFromDimensions(
{3}, {2}, absl::MakeSpan(&padding, 1),
{},
{}));
std::vector<const Shape*> input_shapes = {&input1->shape(), &input2->shape()};
std::vector<const Shape*> init_shapes = {&init1->shape(), &init2->shape()};
TF_ASSERT_OK_AND_ASSIGN(Shape shape,
ShapeInference::InferReduceWindowShape(
input_shapes, init_shapes, window,
compute_tuple->ComputeProgramShape()));
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, input_vec, init_vec, window, compute_tuple));
auto r1 = LiteralUtil::CreateR1<float>({100, 1});
auto expected = LiteralUtil::MakeTuple({&r1, &r1});
m_->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Min3In5Stride2TupleDiffInput) {
HloComputation::Builder builder("main");
auto input1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1})));
auto input2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int>({15, 28, 300, 107, 12})));
HloComputation::Builder bcompute("ComputeFunction");
auto shape1 = ShapeUtil::MakeShape(F32, {});
auto shape2 = ShapeUtil::MakeShape(S32, {});
auto p2 =
bcompute.AddInstruction(HloInstruction::CreateParameter(0, shape1, "x0"));
auto p3 =
bcompute.AddInstruction(HloInstruction::CreateParameter(1, shape2, "x1"));
auto p4 =
bcompute.AddInstruction(HloInstruction::CreateParameter(2, shape1, "y0"));
auto p5 =
bcompute.AddInstruction(HloInstruction::CreateParameter(3, shape2, "y1"));
std::vector<HloInstruction*> compute_vec = {
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kMinimum, p2, p4)),
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kMinimum, p3, p5))};
bcompute.AddInstruction(HloInstruction::CreateTuple(compute_vec));
auto compute_tuple = m_->AddEmbeddedComputation(bcompute.Build());
std::vector<HloInstruction*> input_vec = {input1, input2};
auto init1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(F32)));
auto init2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(S32)));
std::vector<HloInstruction*> init_vec = {init1, init2};
auto padding = std::pair<int64_t, int64_t>(0, 0);
TF_ASSERT_OK_AND_ASSIGN(auto window,
ShapeInference::InferWindowFromDimensions(
{3}, {2}, absl::MakeSpan(&padding, 1),
{},
{}));
std::vector<const Shape*> input_shapes = {&input1->shape(), &input2->shape()};
std::vector<const Shape*> init_shapes = {&init1->shape(), &init2->shape()};
TF_ASSERT_OK_AND_ASSIGN(Shape shape,
ShapeInference::InferReduceWindowShape(
input_shapes, init_shapes, window,
compute_tuple->ComputeProgramShape()));
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, input_vec, init_vec, window, compute_tuple));
auto r1 = LiteralUtil::CreateR1<float>({100, 1});
auto r2 = LiteralUtil::CreateR1<int>({15, 12});
auto expected = LiteralUtil::MakeTuple({&r1, &r2});
m_->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, StridedSlice) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<float>>(3, 5);
operand_array->FillUnique(1.0f);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 1});
b.AddInstruction(HloInstruction::CreateSlice(shape, operand,
{0, 2},
{3, 5},
{2, 3}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({
{3},
{19},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DynamicSlice) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<float>>(2, 4);
operand_array->FillUnique(1.0f);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto zero = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(
HloInstruction::CreateDynamicSlice(shape, operand, {zero, one}, {2, 3}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({
{2, 3, 4},
{6, 7, 8},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DynamicSliceModSlice) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<float>>(2, 4);
operand_array->FillUnique(1.0f);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto two = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto one = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(
HloInstruction::CreateDynamicSlice(shape, operand, {two, one}, {2, 3}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({
{2, 3, 4},
{6, 7, 8},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DynamicSliceUpdate) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto zero = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto update = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}})));
Shape shape = ShapeUtil::MakeShape(F64, {2, 3});
b.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
shape, operand, update, {zero, one}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<double>({
{1, -2, -3},
{5, -6, -7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, SetAndGetTuples) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal2 =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
HloInstruction* operand2 = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal2)));
HloInstruction* operand1 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({0, 1})));
auto tuple =
b.AddInstruction(HloInstruction::CreateTuple({operand1, operand2}));
Shape shape = ShapeUtil::MakeShape(F64, {2, 3});
b.AddInstruction(HloInstruction::CreateGetTupleElement(shape, tuple, 1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<double>({
{1, 2, 3},
{5, 6, 7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, SetAndGetNestedTuples) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2FromArray2D<double>(*operand_array)));
HloInstruction* operand1 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({0, 1})));
auto tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({operand1, operand2}));
auto tuple2 =
b.AddInstruction(HloInstruction::CreateTuple({operand2, operand2}));
auto outer_tuple =
b.AddInstruction(HloInstruction::CreateTuple({tuple1, tuple2}));
b.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple2->shape(), outer_tuple, 1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto result_inner_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
auto expected =
LiteralUtil::MakeTuple({&result_inner_literal, &result_inner_literal});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Reverse) {
HloComputation::Builder b(TestName());
Array4D<float> input({
{{{1.0f}, {2.0f}},
{{3.0f}, {4.0f}},
{{5.0f}, {6.0f}}},
{{{7.0f}, {8.0f}},
{{9.0f}, {10.0f}},
{{11.0f}, {12.0f}}},
{{{13.0f}, {14.0f}},
{{15.0f}, {16.0f}},
{{17.0f}, {18.0f}}},
{{{19.0f}, {20.0f}},
{{21.0f}, {22.0f}},
{{23.0f}, {24.0f}}},
});
auto operand_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
const Shape shape = ShapeUtil::MakeShape(F32, {4, 3, 2, 1});
b.AddInstruction(HloInstruction::CreateReverse(shape, operand, {0, 1}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR4FromArray4D<float>({
{{{23.0f}, {24.0f}},
{{21.0f}, {22.0f}},
{{19.0f}, {20.0f}}},
{{{17.0f}, {18.0f}},
{{15.0f}, {16.0f}},
{{13.0f}, {14.0f}}},
{{{11.0f}, {12.0f}},
{{9.0f}, {10.0f}},
{{7.0f}, {8.0f}}},
{{{5.0f}, {6.0f}},
{{3.0f}, {4.0f}},
{{1.0f}, {2.0f}}},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, EvaluateWithSubstitutions) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
HloInstruction* add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, square));
HloEvaluator evaluator;
Literal param0_literal = LiteralUtil::CreateR1<float>({1, 2, 3, 4});
Literal square_literal = LiteralUtil::CreateR1<float>({10, 20, 30, 40});
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator.EvaluateWithSubstitutions(
add, {{param0, ¶m0_literal}, {square, &square_literal}}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({11, 22, 33, 44}), result));
}
TEST_P(HloEvaluatorBf16Test, EvaluateWithSubstitutionsWithConstantOperand) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
HloInstruction* constant = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
HloInstruction* add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, constant, square));
HloEvaluator evaluator;
Literal square_literal = LiteralUtil::CreateR1<float>({10, 20, 30, 40});
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator.EvaluateWithSubstitutions(add, {{square, &square_literal}}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({11, 22, 33, 44}), result));
}
TEST_F(HloEvaluatorTest, EvaluateWithSubstitutionsLiteralBase) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(S64, {3});
HloInstruction* param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
int64_t int64_values[] = {1, 2, 3};
const Shape literal_shape = ShapeUtil::MakeShape(S64, {3});
BorrowingLiteral literal(reinterpret_cast<const char*>(int64_values),
literal_shape);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(Literal result, evaluator.EvaluateWithSubstitutions(
square, {{param0, &literal}}));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int64_t>({1, 4, 9}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherV1) {
const char* hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {7, 8, 9}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherV2) {
const char* hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 3}, {4, 6}, {7, 9}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherMultipleBatchDims) {
const char* hlo_text = R"(
HloModule TensorFlowGatherMultipleBatchDims
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,3,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}, {2, 1}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR3<int32_t>(
{{{1, 3}, {4, 6}, {7, 9}}, {{3, 2}, {6, 5}, {9, 8}}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherNd) {
const char* hlo_text = R"(
HloModule TensorFlowGatherNd
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{-1, 1}, {-4, 4}}), result));
}
TEST_F(HloEvaluatorTest,
EvaluateGather_TensorFlowGatherNdNonDefaultIndexVectorDim) {
const char* hlo_text = R"(
HloModule TensorFlowGatherNd
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{-2, 2}, {-1, 1}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_DynamicSlice) {
const char* hlo_text = R"(
HloModule DynamicSlice
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[1,1] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>({{5}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_BatchDynamicSlice) {
const char* hlo_text = R"(
HloModule BatchDynamicSlice
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,1,1] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{2, 1}, {1, 1}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR3<int32_t>({{{8}}, {{5}}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_ZeroDimBounds) {
const char* hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,0] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,0] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR2<int32_t>({{}, {}, {}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>({{}, {}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_NoOutputWindowDims) {
const std::string hlo_text = R"(
HloModule GatherXd
ENTRY main {
operand = s32[3] parameter(0)
indices = s32[2,2,1] parameter(1)
ROOT gather = s32[2,2] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2});
Literal start_indices =
LiteralUtil::CreateR3<int32_t>({{{0}, {1}}, {{2}, {1}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{0, 1}, {2, 1}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatterV1_Update) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {4, 5, 6}, {70, 80, 90}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatterV2_Update) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV2
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[3,2] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={0},
inserted_window_dims={1},
scatter_dims_to_operand_dims={1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 30}, {40, 60}, {70, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{10, 2, 30}, {40, 5, 60}, {70, 8, 90}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_Add) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{11, 22, 33}, {4, 5, 6}, {77, 88, 99}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_Mul) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
mul_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT mul = s32[] multiply(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=mul_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>(
{{10, 40, 90}, {4, 5, 6}, {490, 640, 810}}),
result));
}
TEST_P(HloEvaluatorBf16Test, EvaluateScatter_TensorFlowScatter_F32) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(f32[] lhs, f32[] rhs)
}
ENTRY main {
operand = f32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f32[2,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, updates),
to_apply=add_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR2<float>(
{{1.1, 2.2, 3.3}, {4.4, 5.5, 6.6}, {7.7, 8.8, 9.9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({2, 1});
Literal updates =
LiteralUtil::CreateR2<float>({{0.4, 1.1, 0.7}, {2.3, 3.1, 1.6}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>(
{{1.1, 2.2, 3.3}, {6.7, 8.6, 8.2}, {8.1, 9.9, 10.6}}),
result, ErrorSpec{0.1, 0.01}));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_RepeatedIndices) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {84, 105, 126}, {7, 8, 9}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_MultipleBatchDims) {
const char* hlo_text = R"(
HloModule TensorFlowScatterMultipleBatchDims
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,3,2] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={1},
scatter_dims_to_operand_dims={1},
index_vector_dim=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}, {2, 1}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10, 30}, {40, 60}, {70, 90}}, {{5, 5}, {5, 5}, {5, 5}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>(
{{11, 7, 38}, {44, 10, 71}, {77, 13, 104}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatterNd) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNd
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-40, 40}});
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-2, 2}, {-3, 3}},
{{-40, 40}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest,
EvaluateScatter_TensorFlowScatterNd_NonDefaultIndexVectorDim) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNdNonDefaultIndexVectorDim
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-20, 20}});
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{-20, 20}, {-10, 10}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_DynamicUpdateSlice) {
const char* hlo_text = R"(
HloModule DynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[1,1] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
Literal updates = LiteralUtil::CreateR2<int32_t>({{10}});
Literal expected =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 10, 6}, {7, 8, 9}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_BatchDynamicUpdateSlice) {
const char* hlo_text = R"(
HloModule BatchDynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,1,1] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{2, 1}, {1, 1}});
Literal updates = LiteralUtil::CreateR3<int32_t>({{{10}}, {{20}}});
Literal expected =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 20, 6}, {7, 10, 9}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_ZeroDimBounds) {
const char* hlo_text = R"(
HloModule TensorFlowScatter_ZeroDimBounds
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,0] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,0] parameter(2)
ROOT scatter = s32[3,0] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR2<int32_t>({{}, {}, {}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates = LiteralUtil::CreateR2<int32_t>({{}, {}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(operand, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_NoUpdateWindowDims) {
const std::string hlo_text = R"(
HloModule Scatter_NoUpdateWindowDims
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3] parameter(0)
indices = s32[2,2,1] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2});
Literal scatter_indices =
LiteralUtil::CreateR3<int32_t>({{{0}, {1}}, {{2}, {1}}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}});
Literal expected = LiteralUtil::CreateR1<int32_t>({10, 61, 32});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_NegativeIndices) {
const char* hlo_text = R"(
HloModule TensorFlowScatter_NegativeIndices
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({-1, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {77, 88, 99}}),
EvaluateWithModule(module.get(),
{&operand, &scatter_indices, &updates})));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_OobIndices) {
const std::string hlo_text = R"(
HloModule BatchDynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3]{1,0} parameter(0)
indices = s32[6,2]{1,0} parameter(1)
updates = s32[6,1,1]{2,1,0} parameter(2)
ROOT scatter = s32[3,3]{1,0} scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>(
{{2, 7}, {2, 1}, {1, 1}, {5, 1}, {2147483647, 1}, {1, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10}}, {{20}}, {{30}}, {{40}}, {{50}}, {{60}}});
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 30, 60}, {7, 20, 9}}),
EvaluateWithModule(module.get(),
{&operand, &scatter_indices, &updates})));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_OobUpdateWindow) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNd_OobUpdateWindow
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[1,2] parameter(1)
updates = s32[1,2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-40, 40}}});
Literal expected = operand.Clone();
EXPECT_TRUE(LiteralTestUtil::Equal(
expected, EvaluateWithModule(module.get(),
{&operand, &scatter_indices, &updates})));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_Multioutput) {
const char* hlo_text = R"(
HloModule MultioutputScatter
update {
lhs0 = s32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = s32[] parameter(2)
rhs1 = f32[] parameter(3)
ROOT tuple = (s32[], f32[]) tuple(rhs0, rhs1)
}
ENTRY main {
operand0 = s32[3,3,2] parameter(0)
operand1 = f32[3,3,2] parameter(1)
indices = s32[2,2] parameter(2)
updates0 = s32[2,2] parameter(3)
updates1 = f32[2,2] parameter(4)
ROOT scatter = (s32[3,3,2], f32[3,3,2]) scatter(operand0, operand1, indices, updates0, updates1),
to_apply=update,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand0 =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal operand1 =
LiteralUtil::CreateR3<float>({{{-2, 2}, {-3, 3}, {-4, 4}},
{{-5, 5}, {-6, 6}, {-7, 7}},
{{-8, 8}, {-9, 9}, {-10, 10}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates0 = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-40, 40}});
Literal updates1 = LiteralUtil::CreateR2<float>({{-11, 11}, {-41, 41}});
Literal expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-2, 2}, {-3, 3}},
{{-40, 40}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}}),
LiteralUtil::CreateR3<float>({{{-11, 11}, {-3, 3}, {-4, 4}},
{{-41, 41}, {-6, 6}, {-7, 7}},
{{-8, 8}, {-9, 9}, {-10, 10}}}));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
Evaluate({&operand0, &operand1, &scatter_indices, &updates0, &updates1}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesCompareBF16) {
auto lhs = LiteralUtil::CreateR2<bfloat16>(
{{bfloat16(0.25), bfloat16(0.35), bfloat16(0.125)},
{bfloat16(-0.25), bfloat16(-0.35), bfloat16(-0.125)}});
auto rhs = LiteralUtil::CreateR2<bfloat16>(
{{bfloat16(0.5), bfloat16(0.125), bfloat16(0.125)},
{bfloat16(0.25), bfloat16(-0.375), bfloat16(-0.127)}});
auto expected =
LiteralUtil::CreateR2<bool>({{false, true, true}, {false, true, true}});
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs)));
b.AddInstruction(HloInstruction::CreateCompare(expected.shape(), c1, c2,
ComparisonDirection::kGe));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Bf16Reduction) {
const std::string hlo_text = R"(
HloModule Bf16Reduction
add_bf16 (lhs: bf16[], rhs: bf16[]) -> bf16[] {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(bf16[] lhs, bf16[] rhs)
}
ENTRY main {
arg0 = bf16[4]{0} parameter(0)
init = bf16[] constant(0)
ROOT %reduce = bf16[] reduce(arg0, init), dimensions={0}, to_apply=add_bf16
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal arg = LiteralUtil::CreateR1<bfloat16>(
{bfloat16(1.0f), bfloat16(3.0f), bfloat16(-2.0f), bfloat16(42.0f)});
Literal expected = LiteralUtil::CreateR0<bfloat16>(bfloat16(44.0f));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&arg}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MixedPrecisionReduction) {
const std::string hlo_text = R"(
HloModule MixedPrecisionReduction
add_f32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
arg0 = f32[4]{0} parameter(0)
init = f32[] constant(0)
ROOT %reduce = bf16[] reduce(arg0, init), dimensions={0}, to_apply=add_f32
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal arg = LiteralUtil::CreateR1<float>({1.0f, 3.0f, -2.0f, 42.0f});
Literal expected = LiteralUtil::CreateR0<bfloat16>(bfloat16(44.0f));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&arg}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DontFailOnCallUnimplementedOps) {
const std::string hlo_text = R"(
HloModule DontFailOnCall
call {
token0 = token[] after-all()
constant = u32[3]{0} constant({1,2,3})
ROOT outfeed = token[] outfeed(constant, token0), outfeed_shape=u32[3]{0}
}
ENTRY main {
ROOT result = token[] call(), to_apply=call
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto statusor = Evaluate();
EXPECT_FALSE(statusor.status().ok());
}
TEST_F(HloEvaluatorTest, DontFailOnFusionWithUnimplementedOps) {
const std::string hlo_text = R"(
HloModule DontFailOnFusion
fused_computation {
token0 = token[] after-all()
constant = u32[3]{0} constant({1,2,3})
ROOT outfeed = token[] outfeed(constant, token0), outfeed_shape=u32[3]{0}
}
ENTRY main {
ROOT result = token[] fusion(), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto statusor = Evaluate();
EXPECT_FALSE(statusor.status().ok());
}
TEST_P(HloEvaluatorBf16Test, SliceWithDifferentLayout) {
const std::string hlo_text = R"(
HloModule SliceWithDifferentLayout
ENTRY main {
arg = f32[2,2,2]{0,1,2} parameter(0)
ROOT %slice = f32[2,2,2]{1,0,2} slice(f32[2,2,2]{0,1,2} %arg), slice={[0:2], [0:2], [0:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal arg = LiteralUtil::CreateR3WithLayout<float>(
{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}},
LayoutUtil::MakeLayout({0, 1, 2}));
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&arg}));
EXPECT_TRUE(LiteralTestUtil::Equal(arg, actual));
}
TEST_P(HloEvaluatorBf16Test, Bitcast) {
const absl::string_view hlo_text_base = R"(
HloModule Bitcast
ENTRY main {
param = %s[32,121]{1,0} parameter(0)
ROOT bitcast = %s[121,32,1]{0,1,2} bitcast(%s[32,121]{1,0} param)
}
)";
std::string hlo_text;
if (use_bfloat16_) {
hlo_text = absl::StrFormat(hlo_text_base, "bf16", "bf16", "bf16");
} else {
hlo_text = absl::StrFormat(hlo_text_base, "f32", "f32", "f32");
}
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&args[0]}));
if (use_bfloat16_) {
EXPECT_TRUE(
absl::c_equal(args[0].data<bfloat16>(), actual.data<bfloat16>()));
} else {
EXPECT_TRUE(absl::c_equal(args[0].data<float>(), actual.data<float>()));
}
}
TEST_F(HloEvaluatorTest, Int32Overflow) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
c1 = s32[] constant(1073741824)
sum = s32[] add(c1, c1)
c2 = s32[] constant(-2147483648)
sub = s32[] subtract(c2, c1)
c3 = u32[] constant(4294967295)
c4 = u32[] constant(33)
mul = s32[] multiply(c1, c1)
pow = u32[] power(c3, c4)
ROOT tuple = (s32[], s32[], s32[], u32[]) tuple(sum, sub, mul, pow)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(auto literal, Evaluate({}));
std::vector<Literal> actual = literal.DecomposeTuple();
ASSERT_EQ(actual.size(), 4);
uint32_t pow30 = uint32_t{1} << 30;
uint32_t pow31 = uint32_t{1} << 31;
EXPECT_EQ(actual[0].GetFirstElement<int32_t>(), static_cast<int32_t>(pow31));
EXPECT_EQ(actual[1].GetFirstElement<int32_t>(),
static_cast<int32_t>(-(pow31 + pow30)));
EXPECT_EQ(actual[2].GetFirstElement<int32_t>(),
static_cast<int32_t>(pow31 * pow31));
EXPECT_EQ(actual[3].GetFirstElement<uint32_t>(), uint32_t{4294967295});
}
TEST_F(HloEvaluatorTest, GetDimensionSize) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
size = s32[] parameter(0)
data = s32[4] parameter(1)
data_dynamic = s32[<=4] set-dimension-size(data, size), dimensions={0}
sum = s32[<=4] add(data_dynamic, data)
ROOT dynamic_size = s32[] get-dimension-size(sum), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(DynamicDimensionInference dynamic_dimension_inference,
DynamicDimensionInference::Run(m_.get()));
evaluator_.set_dynamic_dimension_inference(&dynamic_dimension_inference);
Literal size_arg = LiteralUtil::CreateR0<int32_t>(3);
Literal data_arg = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4});
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&size_arg, &data_arg}));
EXPECT_EQ(actual.GetFirstElement<int32_t>(), static_cast<int32_t>(3));
}
TEST_F(HloEvaluatorTest, EvaluateWithWrongInputShapes) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
p0 = s32[1] parameter(0)
ROOT sum = s32[1] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal input_wrong_shape = LiteralUtil::CreateR1<int32_t>({0, 1});
EXPECT_EQ(
HloEvaluator().Evaluate(*m_, {&input_wrong_shape}).status().message(),
"Shape mismatch at parameter 0. Computation expected s32[1]{0}, "
"but arg was s32[2]{0}.");
EXPECT_EQ(HloEvaluator()
.Evaluate(*m_->entry_computation(), {&input_wrong_shape})
.status()
.message(),
"Shape mismatch at parameter 0. Computation expected s32[1]{0}, "
"but arg was s32[2]{0}.");
}
TEST_F(HloEvaluatorTest, EvaluateWithWrongNumberOfInputs) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
p0 = s32[1] parameter(0)
ROOT sum = s32[1] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal input = LiteralUtil::CreateR1<int32_t>({0});
EXPECT_EQ(HloEvaluator().Evaluate(*m_, {&input, &input}).status().message(),
"Expected 1 argument, but got 2.");
EXPECT_EQ(HloEvaluator()
.Evaluate(*m_->entry_computation(), {&input, &input})
.status()
.message(),
"Expected 1 argument, but got 2.");
}
TEST_F(HloEvaluatorTest, PreserveFusionInputLayout) {
const absl::string_view hlo_text = R"(
HloModule FusionInputLayout
fused_computation {
param_0 = f32[20,20]{0,1} parameter(0)
ROOT bitcast = f32[20,20]{1,0} bitcast(param_0)
}
ENTRY kernel_entry {
parameter.0 = f32[20,20]{0,1} parameter(0)
ROOT fusion = f32[20,20]{1,0} fusion(parameter.0),
kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&args[0]}));
EXPECT_TRUE(absl::c_equal(args[0].data<float>(), actual.data<float>()));
}
TEST_F(HloEvaluatorTest, PreserveFusionOutputLayout) {
const absl::string_view hlo_text = R"(
HloModule FusionOutputLayout
fused_computation {
param_0 = f32[20,20]{1,0} parameter(0)
ROOT bitcast = f32[20,20]{0,1} bitcast(param_0)
}
ENTRY kernel_entry {
parameter.0 = f32[20,20]{1,0} parameter(0)
ROOT fusion = f32[20,20]{0,1} fusion(parameter.0),
kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&args[0]}));
EXPECT_TRUE(absl::c_equal(args[0].data<float>(), actual.data<float>()));
}
TEST_F(HloEvaluatorTest, PreserveMOFusionOutputLayout) {
const absl::string_view hlo_text = R"(
HloModule MOFusionOutputLayout
fused_computation {
param_0 = f32[20,20]{1,0} parameter(0)
bitcast = f32[20,20]{0,1} bitcast(param_0)
ROOT tuple = (f32[20,20]{0,1}) tuple(bitcast)
}
ENTRY kernel_entry {
parameter.0 = f32[20,20]{1,0} parameter(0)
ROOT fusion = (f32[20,20]{0,1}) fusion(parameter.0),
kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual_tuple, Evaluate({&args[0]}));
std::vector<Literal> actual_literals = actual_tuple.DecomposeTuple();
EXPECT_TRUE(
absl::c_equal(args[0].data<float>(), actual_literals[0].data<float>()));
}
TEST_F(HloEvaluatorTest, EvaluateCustomCall_NoHandler) {
const absl::string_view hlo_text = R"(
HloModule EvaluateCustomCall_NoHandler
ENTRY kernel_entry {
parameter.0 = u32[2,2]{1,0} parameter(0)
ROOT test_root = (u32[2,2]{1,0}) custom-call(parameter.0),
custom_call_target="_my_custom_call"
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
EXPECT_EQ(HloEvaluator().Evaluate(*m_, {&args[0]}).status().code(),
::tsl::error::UNIMPLEMENTED);
}
TEST_F(HloEvaluatorTest, EvaluateCustomCall_HandlerError) {
const absl::string_view hlo_text = R"(
HloModule EvaluateCustomCall_HandlerError
ENTRY kernel_entry {
parameter.0 = u32[2,2]{1,0} parameter(0)
ROOT test_root = (u32[2,2]{1,0}) custom-call(parameter.0),
custom_call_target="_my_custom_call"
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
HloEvaluator evaluator;
evaluator.set_custom_call_handler([](const HloInstruction* custom_call,
absl::Span<const Literal*> operands) {
return Internal("Test error");
});
EXPECT_EQ(evaluator.Evaluate(*m_, {&args[0]}).status().code(),
::tsl::error::INTERNAL);
}
TEST_F(HloEvaluatorTest, EvaluateCustomCall_ManyInputs) {
const absl::string_view hlo_text = R"(
HloModule EvaluateCustomCall_ManyInputs
ENTRY kernel_entry {
parameter.0 = u32[1]{0} parameter(0)
parameter.1 = u32[1]{0} parameter(1)
ROOT test_root = u32[1]{0} custom-call(parameter.0, parameter.1),
custom_call_target="_my_custom_call"
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
HloEvaluator evaluator;
evaluator.set_custom_call_handler([](const HloInstruction* custom_call,
absl::Span<const Literal*> operands) {
EXPECT_EQ(HloOpcode::kCustomCall, custom_call->opcode());
EXPECT_EQ("_my_custom_call", custom_call->custom_call_target());
EXPECT_EQ(2, custom_call->operand_count());
EXPECT_EQ(2, operands.size());
auto output = Literal::CreateFromShape(custom_call->shape());
auto operand0_data = operands[0]->data<uint32_t>();
auto operand1_data = operands[1]->data<uint32_t>();
auto output_data = output.data<uint32_t>();
output_data[0] = operand0_data[0] + operand1_data[0];
return output;
});
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
evaluator.Evaluate(*m_->entry_computation(), {&args[0], &args[1]}));
auto arg0_data = args[0].data<uint32_t>();
auto arg1_data = args[1].data<uint32_t>();
std::vector<uint32_t> expected_data = {arg0_data[0] + arg1_data[0]};
EXPECT_TRUE(absl::c_equal(expected_data, actual_literal.data<uint32_t>()));
}
TEST_F(HloEvaluatorTest, EvaluateCustomCallInFusion) {
const absl::string_view hlo_text = R"(
fusion1 {
p = f32[] parameter(0)
ROOT c = f32[] custom-call(p), custom_call_target="__cchandler1"
}
ENTRY e {
p = f32[] parameter(0)
ROOT f = f32[] fusion(p), kind=kCustom, calls=fusion1
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto input = LiteralUtil::CreateR0<float>(0);
HloEvaluator evaluator;
evaluator.set_custom_call_handler([](const HloInstruction* custom_call,
absl::Span<const Literal*> operands) {
return LiteralUtil::CreateR0<float>(1 -
operands[0]->GetFirstElement<float>());
});
TF_ASSERT_OK_AND_ASSIGN(auto output, evaluator.Evaluate(*m_, {&input}));
EXPECT_EQ(output, LiteralUtil::CreateR0<float>(1));
}
TEST_F(HloEvaluatorTest, IsFiniteF16) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY IsFiniteTest {
c = f16[6] constant({nan, 7, nan, -1, inf, -inf})
ROOT is-finite = pred[6] is-finite(c)
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_THAT(actual_literal.data<bool>(),
::testing::ElementsAre(false, true, false, true, false, false));
}
TEST_F(HloEvaluatorTest, IsFiniteBf16) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY IsFiniteTest {
c = bf16[6] constant({nan, 7, nan, -1, inf, -inf})
ROOT is-finite = pred[6] is-finite(c)
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_THAT(actual_literal.data<bool>(),
::testing::ElementsAre(false, true, false, true, false, false));
}
TEST_F(HloEvaluatorTest, ZeroSizedIotaWithHugeDimension) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY t {
ROOT i = f32[1000000000000, 0] iota(), iota_dimension=0
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_THAT(actual_literal.data<float>(), ::testing::IsEmpty());
}
TEST_F(HloEvaluatorTest, CopyStartCopyDone) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY CopyStartCopyDone {
init = f32[] constant(42.0)
copy-start = (f32[]{:S(1)}, f32[], u32[]) copy-start(init)
ROOT copy-done = f32[] copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR0<float>(42.0f);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, CopyDifferentTypes) {
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY CopyDifferentTypes {
c = bf16[3] constant({1, 2, 3})
ROOT copy = f32[3] copy(bf16[3] c)
}
)"));
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({1.f, 2.f, 3.f}), result));
}
TEST_F(HloEvaluatorTest, AsyncOps) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY AsyncOps {
init = f32[] constant(42.0)
async-start = ((f32[]), f32[], u32[]) negate-start(init)
async-update = ((f32[]), f32[], u32[]) negate-update(async-start)
ROOT async-done = f32[] negate-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR0<float>(-42.0f);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapBF16) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p = bf16[] parameter(0)
add = bf16[] add(p, p)
ROOT conv = f32[] convert(add)
}
ENTRY CopyStartCopyDone {
c = bf16[3] constant({1, 2, 3})
ROOT map = f32[3] map(c), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapS16) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p = s16[] parameter(0)
add = s16[] add(p, p)
ROOT conv = f32[] convert(add)
}
ENTRY CopyStartCopyDone {
c = s16[3] constant({1, 2, 3})
ROOT map = f32[3] map(c), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapU16) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p = u16[] parameter(0)
add = u16[] add(p, p)
ROOT conv = f32[] convert(add)
}
ENTRY CopyStartCopyDone {
c = u16[3] constant({1, 2, 3})
ROOT map = f32[3] map(c), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapMixed) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p0 = u16[] parameter(0)
p1 = f32[] parameter(1)
c0 = f32[] convert(p0)
ROOT add = f32[] add(c0, p1)
}
ENTRY CopyStartCopyDone {
c0 = u16[3] constant({1, 2, 3})
c1 = f32[3] constant({1.5, 2.5, 3.5})
ROOT map = f32[3] map(c0, c1), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.5f, 4.5f, 6.5f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DotUpcast) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY DotUpcast {
l = s16[4,3]{1,0} parameter(0)
r = s8[3,2]{1,0} parameter(1)
ROOT result = s32[4,2] dot(l, r), lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
)";
auto lhs_array = std::make_unique<Array2D<int16_t>>(4, 3);
lhs_array->FillUnique(1);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<int16_t>(*lhs_array);
auto rhs_array = std::make_unique<Array2D<int8_t>>(3, 2);
rhs_array->FillUnique(1);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<int8_t>(*rhs_array);
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&lhs_literal, &rhs_literal}));
auto expected_array =
Array2D<int32_t>({{22, 28}, {58, 76}, {94, 124}, {130, 172}});
auto expected = LiteralUtil::CreateR2FromArray2D<int32_t>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, SortC64) {
const absl::string_view hlo_text = R"(
HloModule m
sort_lt_comparator {
parameter.0 = c64[] parameter(0)
real.0 = f32[] real(parameter.0)
parameter.1 = c64[] parameter(1)
real.1 = f32[] real(parameter.1)
ROOT compare = pred[] compare(real.0, real.1), direction=LT
}
ENTRY main {
c = c64[3] constant({(2, 0), (4, 0), (6, 0)})
ROOT sort = c64[3]{0} sort(c), dimensions={0}, to_apply=sort_lt_comparator
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected =
LiteralUtil::CreateR1<std::complex<float>>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ConvertC128ToC64) {
const absl::string_view hlo_text = R"(
HloModule m
ENTRY main {
c = c128[3] constant({(2, 0), (4, 0), (6, 0)})
ROOT sort = c64[3]{0} convert(c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected =
LiteralUtil::CreateR1<std::complex<float>>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, RecursivelyEvaluateNonConstantOperands) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Literal c1_literal = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
Literal c2_literal = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* c1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c1_literal)));
HloInstruction* c2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c2_literal)));
HloInstruction* add0 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, c0, c1));
HloInstruction* add1 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, c1, c2));
HloInstruction* add2 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, add1));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{2, 16}, {6, 16}});
TestRecursivelyEvaluateInstruction(add2, expected);
}
TEST_F(HloEvaluatorTest, GetTupleElementOnPartiallyKnownTupleSucceeds) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* tuple =
b.AddInstruction(HloInstruction::CreateTuple({p0, p1, c0}));
HloInstruction* gte =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple, 2));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
TestRecursivelyEvaluateInstruction(gte, expected);
}
TEST_F(HloEvaluatorTest, InfeedFailure) {
HloComputation::Builder b(TestName());
HloInstruction* token = b.AddInstruction(HloInstruction::CreateToken());
HloInstruction* infeed = b.AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {4, 4}), token, ""));
m_->AddEntryComputation(b.Build());
TestRecursiveEvaluationFailure(infeed);
}
TEST_F(HloEvaluatorTest, GetUnknownTupleElementFails) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* tuple =
b.AddInstruction(HloInstruction::CreateTuple({p0, p1, c0}));
HloInstruction* gte =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple, 0));
m_->AddEntryComputation(b.Build());
TestRecursiveEvaluationFailure(gte);
}
TEST_F(HloEvaluatorTest, GetTupleElementFromNestedTupleSucceeds) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* tuple0 =
b.AddInstruction(HloInstruction::CreateTuple({p0, c0}));
HloInstruction* tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({tuple0, p1}));
HloInstruction* gte0 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple1, 0));
HloInstruction* gte1 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(gte0, 1));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
TestRecursivelyEvaluateInstruction(gte1, expected);
}
TEST_F(HloEvaluatorTest, GetTupleElementInterleavedWithTupleSucceeds) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* p2 =
b.AddInstruction(HloInstruction::CreateParameter(2, shape, "param.2"));
HloInstruction* tuple0 =
b.AddInstruction(HloInstruction::CreateTuple({p0, c0}));
HloInstruction* tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({tuple0, p1}));
HloInstruction* gte0 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple1, 0));
HloInstruction* tuple2 =
b.AddInstruction(HloInstruction::CreateTuple({gte0, p2}));
HloInstruction* gte1 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple2, 0));
HloInstruction* gte2 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(gte1, 1));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
TestRecursivelyEvaluateInstruction(gte2, expected);
}
TEST_F(HloEvaluatorTest, ParameterThroughCallSucceeds) {
constexpr absl::string_view kHloModule = R"(
HloModule parameter_through_call
%identity {
ROOT %param = s32[] parameter(0)
}
ENTRY parameter_through_call {
%constant = s32[] constant(42)
ROOT %call = s32[] call(s32[] %constant), to_apply=%identity
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
const HloInstruction* parameter_instruction = nullptr;
for (const auto* computation : hlo_module->computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_instruction = instruction;
}
}
}
ASSERT_NE(parameter_instruction, nullptr);
Literal expected = LiteralUtil::CreateR0<int32_t>(42);
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(parameter_instruction, {},
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ParameterThroughCallSucceedsWithPrecomputation) {
constexpr absl::string_view kHloModule = R"(
HloModule parameter_through_call
%identity {
ROOT %param = s32[] parameter(0)
}
ENTRY parameter_through_call {
%constant = s32[] constant(42)
ROOT %call = s32[] call(s32[] %constant), to_apply=%identity
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
const HloInstruction* parameter_instruction = nullptr;
for (const auto* computation : hlo_module->computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_instruction = instruction;
}
}
}
ASSERT_NE(parameter_instruction, nullptr);
Literal expected = LiteralUtil::CreateR0<int32_t>(42);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to,
TuplePointsToAnalysis::Run(hlo_module.get()));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module.get());
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(parameter_instruction,
{tuple_points_to.get(), call_graph.get()},
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
class PatternMatchParseWhileLoopTest : public HloTestBase {};
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundDefinedInsideOfCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%loop_bound = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %loop_bound), direction=LT
}
%while_body {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 5);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 5);
}
TEST_F(PatternMatchParseWhileLoopTest,
LoopBoundDefinedInsideOfCondWithPrecomputation) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%loop_bound = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %loop_bound), direction=LT
}
%while_body {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to,
TuplePointsToAnalysis::Run(hlo_module.get()));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module.get());
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop = PatternMatchParseWhileLoop(
while_op, {tuple_points_to.get(), call_graph.get()});
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 5);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 5);
}
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundDefinedOutsideOfCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %constant.1, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundComputedOutsideOfCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(10)
%constant.2 = s32[] constant(4)
%loop_bound = s32[] multiply(s32[] %constant.1, s32[] %constant.2)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 40);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 40);
}
TEST_F(PatternMatchParseWhileLoopTest, StepSizeNotOne) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(4)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(10)
%constant.2 = s32[] constant(4)
%loop_bound = s32[] multiply(s32[] %constant.1, s32[] %constant.2)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 4);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 40);
}
TEST_F(PatternMatchParseWhileLoopTest, RecursiveCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%compute_pred {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, RecursiveCondGetTupleElement) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%compute_pred {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%get_tuple_element {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred
%gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
ROOT %tuple.1 = (pred[]) tuple(pred[] %gte.4)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%get_tuple_element
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundDependsOnAnotherLoop) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%compute_pred.0 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%while_condition.0 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred.0
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body.0 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
%compute_pred.1 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%while_condition.1 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred.1
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body.1 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%param.2 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(0)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init.0 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while.0 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init.0), condition=%while_condition.0, body=%while_body.0
%result.0 = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while.0), index=3
%new_loop_bound = s32[] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while.0), index=0
%while_init.1 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %new_loop_bound, f32[1024, 1024] %param.2, f32[1024, 1024] %result.0)
%while.1 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init.1), condition=%while_condition.1, body=%while_body.1
ROOT %result.1 = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while.1), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, DynamicLoop) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%param.2 = s32[] parameter(1)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %param.2, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_TRUE(parsed_while_loop->is_dynamic());
}
TEST_F(PatternMatchParseWhileLoopTest, BooleanCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (pred[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
ROOT %gte.0 = pred[] get-tuple-element(%param), index=0
}
%while_body {
%param = (pred[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = pred[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%new_loop_cond = pred[] constant(false)
ROOT %loop_result = (pred[], f32[1024, 1024], f32[1024, 1024]) tuple(%new_loop_cond, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = pred[] constant(true)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (pred[], f32[1024, 1024], f32[1024, 1024]) tuple(pred[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (pred[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((pred[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 1);
}
TEST_F(PatternMatchParseWhileLoopTest, NestedLoop) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%nested_while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%nested_while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
%while_condition {
%param = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = s32[] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024, 1024] get-tuple-element(%param), index=4
%constant.4 = s32[] constant(0)
%nested_while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.4, s32[] %gte.2, f32[1024, 1024] %gte.3, f32[1024, 1024] %gte.4)
%nested_while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%nested_while_init), condition=%nested_while_condition, body=%nested_while_body
%nested_while_result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %nested_while), index=3
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %gte.3, %nested_while_result)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%param.2 = s32[] parameter(1)
%constant.0 = s32[] constant(0)
%constant.2 = s32[] constant(4)
%loop_bound = s32[] multiply(s32[] %param.2, s32[] %constant.2)
%constant.3 = s32[] constant(5)
%nested_loop_bound = s32[] multiply(s32[] %constant.3, s32[] %constant.2)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, s32[] %nested_loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=4
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
HloComputation* while_body = while_op->while_body();
HloInstruction* nested_while =
while_body->root_instruction()->mutable_operand(4)->mutable_operand(0);
CHECK_EQ(nested_while->opcode(), HloOpcode::kWhile);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(nested_while);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 20);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 20);
}
TEST_F(PatternMatchParseWhileLoopTest, CopiedLoopCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%copy.0 = s32[] copy(s32[] %gte.0)
%loop_bound = s32[] constant(5)
%result = pred[] compare(%gte.0, %loop_bound), direction=LT
ROOT %copy.1 = pred[] copy(pred[] %result)
}
%while_body {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 5);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 5);
}
TEST_F(HloEvaluatorTest, DotTraced) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY DotUpcast {
l = s16[4,3]{1,0} parameter(0)
r = s8[3,2]{1,0} parameter(1)
ROOT result = s32[4,2] dot(l, r), lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
)";
auto lhs_array = std::make_unique<Array2D<int16_t>>(4, 3);
lhs_array->FillUnique(1);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<int16_t>(*lhs_array);
auto rhs_array = std::make_unique<Array2D<int8_t>>(3, 2);
rhs_array->FillUnique(1);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<int8_t>(*rhs_array);
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
absl::flat_hash_set<std::array<int64_t, 3>> macs_traced;
auto mac_handler = [&macs_traced](int64_t result_index, int64_t lhs_index,
int64_t rhs_index) -> void {
macs_traced.insert(
std::array<int64_t, 3>{result_index, lhs_index, rhs_index});
};
evaluator_.set_trace_mac_handler(mac_handler);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&lhs_literal, &rhs_literal}));
auto expected_array =
Array2D<int32_t>({{22, 28}, {58, 76}, {94, 124}, {130, 172}});
auto expected = LiteralUtil::CreateR2FromArray2D<int32_t>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
const absl::flat_hash_set<std::array<int64_t, 3>> macs_expected = {
{1, 0, 1}, {0, 0, 0}, {2, 4, 2}, {5, 6, 1}, {2, 5, 4}, {4, 7, 2},
{2, 3, 0}, {5, 7, 3}, {5, 8, 5}, {4, 6, 0}, {6, 9, 0}, {7, 10, 3},
{7, 11, 5}, {1, 1, 3}, {0, 2, 4}, {3, 4, 3}, {1, 2, 5}, {7, 9, 1},
{6, 10, 2}, {6, 11, 4}, {3, 5, 5}, {4, 8, 4}, {0, 1, 2}, {3, 3, 1}};
EXPECT_EQ(macs_traced, macs_expected);
}
TEST_F(HloEvaluatorTest, SimpleConvTraced) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 4, 4});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
absl::flat_hash_set<std::array<int64_t, 3>> macs_traced;
auto mac_handler = [&macs_traced](int64_t result_index, int64_t lhs_index,
int64_t rhs_index) -> void {
macs_traced.insert(
std::array<int64_t, 3>{result_index, lhs_index, rhs_index});
};
evaluator_.set_trace_mac_handler(mac_handler);
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 4, 4);
expected_array.FillWithYX(Array2D<float>({
{100, 126, 152, 76},
{204, 230, 256, 124},
{308, 334, 360, 172},
{149, 160, 171, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
const absl::flat_hash_set<std::array<int64_t, 3>> macs_expected = {
{10, 14, 2}, {7, 7, 0}, {11, 15, 2}, {4, 4, 0}, {3, 7, 2},
{5, 9, 2}, {8, 9, 1}, {12, 12, 0}, {6, 10, 2}, {5, 6, 1},
{13, 14, 1}, {15, 15, 0}, {11, 11, 0}, {0, 5, 3}, {10, 10, 0},
{2, 7, 3}, {13, 13, 0}, {1, 6, 3}, {0, 0, 0}, {4, 9, 3},
{8, 12, 2}, {8, 13, 3}, {9, 9, 0}, {6, 7, 1}, {9, 13, 2},
{2, 6, 2}, {0, 1, 1}, {6, 6, 0}, {5, 10, 3}, {10, 15, 3},
{14, 14, 0}, {7, 11, 2}, {0, 4, 2}, {10, 11, 1}, {6, 11, 3},
{2, 2, 0}, {3, 3, 0}, {9, 14, 3}, {12, 13, 1}, {1, 5, 2},
{5, 5, 0}, {14, 15, 1}, {1, 1, 0}, {2, 3, 1}, {4, 5, 1},
{4, 8, 2}, {9, 10, 1}, {8, 8, 0}, {1, 2, 1},
};
EXPECT_EQ(macs_traced, macs_expected);
}
TEST(EvalErrorTest, OK) {
EXPECT_EQ(std::nullopt, internal::ParseEvalErrorDetail(absl::OkStatus()));
}
TEST(EvalErrorTest, NoPayload) {
EXPECT_EQ(std::nullopt,
internal::ParseEvalErrorDetail(absl::InternalError("hmm")));
}
TEST(EvalErrorTest, Payload) {
absl::Status s = absl::InternalError("hmm");
std::string payload;
payload.resize(sizeof(internal::EvalErrorDetail));
absl::little_endian::Store32(
const_cast<char*>(payload.data()),
static_cast<uint32_t>(
internal::EvalErrorDetail::kDynamicValueDependence));
s.SetPayload(internal::kEvalErrorDetailUrl, absl::Cord(payload));
EXPECT_EQ(internal::ParseEvalErrorDetail(s),
internal::EvalErrorDetail::kDynamicValueDependence);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/evaluator/hlo_evaluator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04380fb2-2d79-479f-a1d9-115cff0d4df9 | cpp | google/arolla | aggregation | arolla/expr/operators/aggregation.cc | arolla/qexpr/operators/aggregation/aggregation_test.cc | #include "arolla/expr/operators/aggregation.h"
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::IsDefaultEdgeArg;
using ::arolla::expr::IsGroupScalarEdge;
TakeOperator::TakeOperator()
: BasicExprOperator(
"array.take",
ExprOperatorSignature(
{{"x"},
{"ids"},
{.name = "over", .default_value = TypedValue::FromValue(kUnit)},
{.name = "ids_over",
.default_value = TypedValue::FromValue(kUnit)}}),
"",
FingerprintHasher("arolla::expr_operators::TakeOperator").Finish()) {}
absl::StatusOr<ExprNodePtr> TakeOperator::ToLowerLevel(
const ExprNodePtr& node) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
const auto& node_deps = node->node_deps();
DCHECK_GE(node_deps.size(), 4);
const ExprNodePtr& values = node_deps[0];
const ExprNodePtr& offsets = node_deps[1];
ExprNodePtr values_edge = node_deps[2];
ExprNodePtr offsets_edge = node_deps[3];
bool is_scalar_values_edge = IsDefaultEdgeArg(values_edge);
if (!is_scalar_values_edge) {
ASSIGN_OR_RETURN(is_scalar_values_edge, IsGroupScalarEdge(values_edge));
}
bool is_scalar_offsets_edge = IsDefaultEdgeArg(offsets_edge);
if (!is_scalar_offsets_edge) {
ASSIGN_OR_RETURN(is_scalar_offsets_edge, IsGroupScalarEdge(offsets_edge));
}
if (is_scalar_values_edge != is_scalar_offsets_edge) {
return absl::InvalidArgumentError(absl::StrFormat(
"Two edges must share the parent side but only one of them is an edge "
"to scalar. is_scalar_values_edge(=%d) != is_scalar_offsets_edge(=%d)",
is_scalar_values_edge, is_scalar_offsets_edge));
}
if (is_scalar_values_edge) {
return CallOp("array.at", {values, offsets});
}
if (values_edge->fingerprint() == offsets_edge->fingerprint()) {
return CallOp("array._take_over", {values, offsets, values_edge});
}
return CallOp("array._take_over_over",
{values, offsets, values_edge, offsets_edge});
}
absl::StatusOr<QTypePtr> TakeOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
return input_qtypes[0];
}
} | #include <cmath>
#include <cstdint>
#include <limits>
#include <optional>
#include <set>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/aggregation_ops_interface.h"
#include "arolla/qexpr/operators/aggregation/group_op_accumulators.h"
#include "arolla/util/bytes.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::FloatEq;
using ::testing::HasSubstr;
struct TestAccumulator : Accumulator<AccumulatorType::kAggregator, int,
meta::type_list<>, meta::type_list<int>> {
explicit TestAccumulator(int init = 0) : init_val(init) {}
void Reset() final { res = init_val; };
void Add(int v) final { res += v; }
int GetResult() final { return res; }
int init_val;
int res;
};
struct TestAccumulator2 : public TestAccumulator {
static TestAccumulator2 Create(int init = 0) {
return TestAccumulator2(init);
}
static absl::StatusOr<TestAccumulator2> Create(absl::string_view init) {
int init_val;
if (!absl::SimpleAtoi(init, &init_val)) {
return absl::InvalidArgumentError(
absl::Substitute("Expected integer, got '$0'", init));
}
return TestAccumulator2(init_val);
}
private:
explicit TestAccumulator2(int init) : TestAccumulator(init) {}
};
TEST(Accumulator, AddN) {
TestAccumulator acc;
acc.Reset();
acc.AddN(10, 5);
EXPECT_EQ(acc.GetResult(), 50);
}
TEST(OpInterface, CreateWithConstructor) {
ASSERT_OK_AND_ASSIGN(TestAccumulator default_accumulator,
CreateAccumulator<TestAccumulator>());
EXPECT_EQ(default_accumulator.init_val, 0);
ASSERT_OK_AND_ASSIGN(TestAccumulator init_accumulator,
CreateAccumulator<TestAccumulator>(5));
EXPECT_EQ(init_accumulator.init_val, 5);
}
TEST(OpInterface, CreateWithMethod) {
ASSERT_OK_AND_ASSIGN(TestAccumulator2 default_accumulator,
CreateAccumulator<TestAccumulator2>());
EXPECT_EQ(default_accumulator.init_val, 0);
ASSERT_OK_AND_ASSIGN(TestAccumulator2 init_accumulator,
CreateAccumulator<TestAccumulator2>("5"));
EXPECT_EQ(init_accumulator.init_val, 5);
EXPECT_THAT(CreateAccumulator<TestAccumulator2>("foo"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Expected integer, got 'foo'")));
}
TEST(Accumulator, LogicalAdd) {
LogicalAllAggregator acc;
acc.Reset();
EXPECT_EQ(acc.GetResult(), true);
acc.Reset();
acc.AddN(2, std::nullopt);
EXPECT_EQ(acc.GetResult(), std::nullopt);
acc.Reset();
acc.AddN(2, std::nullopt);
acc.Add(false);
EXPECT_EQ(acc.GetResult(), false);
acc.Reset();
acc.Add(std::nullopt);
acc.AddN(2, true);
EXPECT_EQ(acc.GetResult(), std::nullopt);
acc.Reset();
acc.AddN(2, true);
EXPECT_EQ(acc.GetResult(), true);
}
TEST(Accumulator, LogicalOr) {
LogicalAnyAggregator acc;
acc.Reset();
EXPECT_EQ(acc.GetResult(), false);
acc.Reset();
acc.AddN(2, std::nullopt);
EXPECT_EQ(acc.GetResult(), std::nullopt);
acc.Reset();
acc.AddN(2, std::nullopt);
acc.Add(false);
EXPECT_EQ(acc.GetResult(), std::nullopt);
acc.Reset();
acc.Add(std::nullopt);
acc.AddN(2, true);
EXPECT_EQ(acc.GetResult(), true);
acc.Reset();
acc.AddN(2, true);
EXPECT_EQ(acc.GetResult(), true);
}
TEST(Accumulator, InverseMapping) {
InverseMappingAccumulator acc;
acc.Add(1);
acc.Add(3);
acc.Add(2);
acc.Add(0);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), int64_t{3});
EXPECT_EQ(acc.GetResult(), int64_t{0});
EXPECT_EQ(acc.GetResult(), int64_t{2});
EXPECT_EQ(acc.GetResult(), int64_t{1});
EXPECT_EQ(acc.GetStatus(), absl::OkStatus());
acc.Reset();
acc.Add(std::nullopt);
acc.Add(4);
acc.Add(0);
acc.Add(std::nullopt);
acc.Add(2);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), int64_t{2});
EXPECT_EQ(acc.GetResult(), std::nullopt);
EXPECT_EQ(acc.GetResult(), int64_t{4});
EXPECT_EQ(acc.GetResult(), std::nullopt);
EXPECT_EQ(acc.GetResult(), int64_t{1});
EXPECT_EQ(acc.GetStatus(), absl::OkStatus());
acc.Reset();
acc.Add(0);
acc.Add(2);
acc.FinalizeFullGroup();
acc.GetResult();
acc.GetResult();
EXPECT_THAT(
acc.GetStatus(),
StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"unable to compute array.inverse_mapping: invalid permutation, "
"element 2 is not a valid element of a permutation of size 2")));
acc.Reset();
EXPECT_THAT(
acc.GetStatus(),
StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"unable to compute array.inverse_mapping: invalid permutation, "
"element 2 is not a valid element of a permutation of size 2")));
acc.Reset();
acc.Add(0);
acc.Add(0);
acc.FinalizeFullGroup();
acc.GetResult();
acc.GetResult();
EXPECT_THAT(
acc.GetStatus(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"unable to compute array.inverse_mapping: invalid permutation, "
"element 0 appears twice in the permutation")));
}
TEST(Accumulator, GroupBy) {
int64_t group_counter = 10;
GroupByAccumulator<float> acc(&group_counter);
acc.Reset();
acc.Add(2.0f);
EXPECT_EQ(acc.GetResult(), 10);
acc.Add(3.0f);
EXPECT_EQ(acc.GetResult(), 11);
acc.Add(2.0f);
EXPECT_EQ(acc.GetResult(), 10);
acc.Reset();
acc.Add(3.0f);
EXPECT_EQ(acc.GetResult(), 12);
acc.Add(2.0f);
EXPECT_EQ(acc.GetResult(), 13);
acc.Add(3.0f);
EXPECT_EQ(acc.GetResult(), 12);
acc.Add(2.0f);
EXPECT_EQ(acc.GetResult(), 13);
}
TEST(Accumulator, PermuteInt) {
ArrayTakeOverAccumulator<int> acc;
acc.Add(0, 2);
acc.Add(1, 0);
acc.Add(2, 1);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 1);
EXPECT_EQ(acc.GetStatus(), absl::OkStatus());
acc.Reset();
acc.Add(10, std::nullopt);
acc.Add(std::nullopt, 1);
acc.Add(20, 0);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), std::nullopt);
EXPECT_EQ(acc.GetResult(), std::nullopt);
EXPECT_EQ(acc.GetResult(), 10);
EXPECT_EQ(acc.GetStatus(), absl::OkStatus());
acc.Reset();
acc.Add(0, 0);
acc.Add(1, 2);
acc.FinalizeFullGroup();
acc.GetResult();
acc.GetResult();
EXPECT_THAT(acc.GetStatus(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid offsets: 2 is not a valid offset of "
"an array of size 2")));
acc.Reset();
EXPECT_THAT(acc.GetStatus(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid offsets: 2 is not a valid offset of "
"an array of size 2")));
}
TEST(Accumulator, PermuteBytes) {
ArrayTakeOverAccumulator<Bytes> acc;
std::vector<std::pair<OptionalValue<Bytes>, OptionalValue<int64_t>>> inputs(
{{Bytes("the"), 4},
{Bytes("clone"), 0},
{Bytes("war"), 1},
{Bytes("has"), 2},
{Bytes("begun"), 3}});
for (const auto& add : inputs) {
acc.Add(add.first, add.second);
}
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), "begun");
EXPECT_EQ(acc.GetResult(), "the");
EXPECT_EQ(acc.GetResult(), "clone");
EXPECT_EQ(acc.GetResult(), "war");
EXPECT_EQ(acc.GetResult(), "has");
EXPECT_EQ(acc.GetStatus(), absl::OkStatus());
}
TEST(Accumulator, CDF) {
WeightedCDFAccumulator<float, float> acc;
acc.Add(0.1, 0.1);
acc.Add(0.2, 0.2);
acc.Add(0.20001, 0.1);
acc.Add(0.1, 0.2);
acc.Add(-0.1, 0.3);
acc.Add(-0.2, 0.1);
acc.FinalizeFullGroup();
EXPECT_THAT(acc.GetResult(), FloatEq(0.7));
EXPECT_THAT(acc.GetResult(), FloatEq(0.9));
EXPECT_THAT(acc.GetResult(), FloatEq(1));
EXPECT_THAT(acc.GetResult(), FloatEq(0.7));
EXPECT_THAT(acc.GetResult(), FloatEq(0.4));
EXPECT_THAT(acc.GetResult(), FloatEq(0.1));
acc.Reset();
acc.Add(1, 1);
acc.Add(0, 1);
acc.FinalizeFullGroup();
EXPECT_THAT(acc.GetResult(), FloatEq(1));
EXPECT_THAT(acc.GetResult(), FloatEq(0.5));
acc.Reset();
acc.FinalizeFullGroup();
}
TEST(Accumulator, CDFBig) {
WeightedCDFAccumulator<float, float> acc;
for (int i = 0; i < 18000000; ++i) {
acc.Add(0.0, 1.0);
}
for (int i = 0; i < 2000000; ++i) {
acc.Add(i, 1.0);
}
acc.FinalizeFullGroup();
EXPECT_THAT(acc.GetResult(), FloatEq(0.9));
}
TEST(Accumulator, OrdinalRank) {
OrdinalRankAccumulator<float, int64_t> acc;
acc.Add(7, 10);
acc.Add(7, 9);
acc.Add(1, 7);
acc.Add(2, 10);
acc.Add(2, 11);
acc.Add(2, 10);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 5);
EXPECT_EQ(acc.GetResult(), 4);
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 1);
EXPECT_EQ(acc.GetResult(), 3);
EXPECT_EQ(acc.GetResult(), 2);
}
TEST(Accumulator, OrdinalRank_Descending) {
OrdinalRankAccumulator<float, int> acc(true);
acc.Add(7, 10);
acc.Add(7, 9);
acc.Add(std::numeric_limits<float>::quiet_NaN(), 10);
acc.Add(1, 10);
acc.Add(2, 10);
acc.Add(2, 10);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 1);
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 5);
EXPECT_EQ(acc.GetResult(), 4);
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 3);
}
TEST(Accumulator, DenseRank) {
DenseRankAccumulator<int> acc;
acc.Add(7);
acc.Add(7);
acc.Add(1);
acc.Add(2);
acc.Add(2);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 1);
EXPECT_EQ(acc.GetResult(), 1);
acc.Reset();
acc.Add(3);
acc.Add(0);
acc.Add(2);
acc.Add(1);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 3);
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 1);
}
TEST(Accumulator, DenseRankWithNan) {
DenseRankAccumulator<float> acc;
acc.Add(7);
acc.Add(2);
acc.Add(std::numeric_limits<float>::quiet_NaN());
acc.Add(7);
acc.Add(1);
acc.Add(std::numeric_limits<float>::quiet_NaN());
acc.Add(2);
acc.FinalizeFullGroup();
std::set<int64_t> ranks_of_nan;
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 1);
ranks_of_nan.insert(acc.GetResult());
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 0);
ranks_of_nan.insert(acc.GetResult());
EXPECT_EQ(acc.GetResult(), 1);
EXPECT_EQ(ranks_of_nan, (std::set<int64_t>{3, 4}));
}
TEST(Accumulator, DenseRank_Descending) {
DenseRankAccumulator<float> acc(true);
acc.Add(7);
acc.Add(7);
acc.Add(1);
acc.Add(2);
acc.Add(2);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 1);
EXPECT_EQ(acc.GetResult(), 1);
acc.Reset();
acc.Add(3);
acc.Add(0);
acc.Add(std::numeric_limits<float>::quiet_NaN());
acc.Add(1);
acc.FinalizeFullGroup();
EXPECT_EQ(acc.GetResult(), 0);
EXPECT_EQ(acc.GetResult(), 2);
EXPECT_EQ(acc.GetResult(), 3);
EXPECT_EQ(acc.GetResult(), 1);
}
TEST(Accumulator, AggMedian) {
MedianAggregator<int> acc;
EXPECT_EQ(acc.GetResult(), std::nullopt);
acc.Reset();
acc.Add(7);
acc.Add(1);
acc.Add(1);
acc.Add(2);
EXPECT_EQ(acc.GetResult(), 1);
acc.Reset();
acc.Add(7);
acc.Add(1);
acc.Add(2);
EXPECT_EQ(acc.GetResult(), 2);
}
TEST(Accumulator, AggMedianNan) {
MedianAggregator<float> acc;
acc.Add(7);
acc.Add(1);
acc.Add(2);
acc.Add(std::numeric_limits<float>::quiet_NaN());
EXPECT_TRUE(std::isnan(acc.GetResult().value));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/aggregation.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/aggregation/aggregation_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
49087145-934b-4828-9aec-9b8a982968fa | cpp | google/tensorstore | parse_json_matches | tensorstore/internal/parse_json_matches.cc | tensorstore/internal/parse_json_matches_test.cc | #include "tensorstore/internal/parse_json_matches.h"
#include <ostream>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
namespace tensorstore {
namespace internal {
namespace {
class Matcher : public ::testing::MatcherInterface<std::string> {
public:
Matcher(::testing::Matcher<::nlohmann::json> json_matcher)
: json_matcher_(std::move(json_matcher)) {}
bool MatchAndExplain(
std::string value,
::testing::MatchResultListener* listener) const override {
return json_matcher_.MatchAndExplain(
tensorstore::internal::ParseJson(value), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "when parsed as JSON ";
json_matcher_.DescribeTo(os);
}
private:
::testing::Matcher<::nlohmann::json> json_matcher_;
};
}
::testing::Matcher<std::string> ParseJsonMatches(
::testing::Matcher<::nlohmann::json> json_matcher) {
return ::testing::MakeMatcher(new Matcher(std::move(json_matcher)));
}
::testing::Matcher<std::string> ParseJsonMatches(::nlohmann::json json) {
return ParseJsonMatches(MatchesJson(json));
}
}
} | #include "tensorstore/internal/parse_json_matches.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::internal::ParseJsonMatches;
TEST(ParseJsonMatchesTest, Describe) {
std::ostringstream ss;
ParseJsonMatches(::nlohmann::json(true)).DescribeTo(&ss);
EXPECT_EQ("when parsed as JSON matches json true", ss.str());
}
TEST(ParseJsonMatchesTest, Explain) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(ParseJsonMatches(::nlohmann::json(true)),
"false", &listener);
EXPECT_EQ(
"where the difference is:\n"
"[\n"
" {\n"
" \"op\": \"replace\",\n"
" \"path\": \"\",\n"
" \"value\": false\n"
" }\n"
"]",
listener.str());
}
TEST(ParseJsonMatchesTest, Matches) {
EXPECT_THAT("{\"a\":\"b\"}", ParseJsonMatches(::nlohmann::json{{"a", "b"}}));
EXPECT_THAT("{\"a\":\"b\"}",
::testing::Not(ParseJsonMatches(::nlohmann::json{{"a", "c"}})));
EXPECT_THAT("invalid",
::testing::Not(ParseJsonMatches(::nlohmann::json{{"a", "c"}})));
EXPECT_THAT("{\"a\":\"b\"}",
ParseJsonMatches(::testing::Not(::nlohmann::json{{"a", "c"}})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/parse_json_matches.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/parse_json_matches_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f3e68bf1-a58d-4abf-a35d-59676e9bf3a6 | cpp | abseil/abseil-cpp | sample_recorder | absl/profiling/internal/sample_recorder.h | absl/profiling/internal/sample_recorder_test.cc | #ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
#define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
#include <atomic>
#include <cstddef>
#include <functional>
#include "absl/base/config.h"
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
template <typename T>
struct Sample {
absl::Mutex init_mu;
T* next = nullptr;
T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
int64_t weight;
};
template <typename T>
class SampleRecorder {
public:
SampleRecorder();
~SampleRecorder();
template <typename... Targs>
T* Register(Targs&&... args);
void Unregister(T* sample);
using DisposeCallback = void (*)(const T&);
DisposeCallback SetDisposeCallback(DisposeCallback f);
int64_t Iterate(const std::function<void(const T& stack)>& f);
size_t GetMaxSamples() const;
void SetMaxSamples(size_t max);
private:
void PushNew(T* sample);
void PushDead(T* sample);
template <typename... Targs>
T* PopDead(Targs... args);
std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_;
std::atomic<size_t> max_samples_{1 << 20};
std::atomic<T*> all_;
T graveyard_;
std::atomic<DisposeCallback> dispose_;
};
template <typename T>
typename SampleRecorder<T>::DisposeCallback
SampleRecorder<T>::SetDisposeCallback(DisposeCallback f) {
return dispose_.exchange(f, std::memory_order_relaxed);
}
template <typename T>
SampleRecorder<T>::SampleRecorder()
: dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
absl::MutexLock l(&graveyard_.init_mu);
graveyard_.dead = &graveyard_;
}
template <typename T>
SampleRecorder<T>::~SampleRecorder() {
T* s = all_.load(std::memory_order_acquire);
while (s != nullptr) {
T* next = s->next;
delete s;
s = next;
}
}
template <typename T>
void SampleRecorder<T>::PushNew(T* sample) {
sample->next = all_.load(std::memory_order_relaxed);
while (!all_.compare_exchange_weak(sample->next, sample,
std::memory_order_release,
std::memory_order_relaxed)) {
}
}
template <typename T>
void SampleRecorder<T>::PushDead(T* sample) {
if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
dispose(*sample);
}
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
absl::MutexLock sample_lock(&sample->init_mu);
sample->dead = graveyard_.dead;
graveyard_.dead = sample;
}
template <typename T>
template <typename... Targs>
T* SampleRecorder<T>::PopDead(Targs... args) {
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
T* sample = graveyard_.dead;
if (sample == &graveyard_) return nullptr;
absl::MutexLock sample_lock(&sample->init_mu);
graveyard_.dead = sample->dead;
sample->dead = nullptr;
sample->PrepareForSampling(std::forward<Targs>(args)...);
return sample;
}
template <typename T>
template <typename... Targs>
T* SampleRecorder<T>::Register(Targs&&... args) {
size_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > max_samples_.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
dropped_samples_.fetch_add(1, std::memory_order_relaxed);
return nullptr;
}
T* sample = PopDead(args...);
if (sample == nullptr) {
sample = new T();
{
absl::MutexLock sample_lock(&sample->init_mu);
sample->init_mu.ForgetDeadlockInfo();
sample->PrepareForSampling(std::forward<Targs>(args)...);
}
PushNew(sample);
}
return sample;
}
template <typename T>
void SampleRecorder<T>::Unregister(T* sample) {
PushDead(sample);
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
}
template <typename T>
int64_t SampleRecorder<T>::Iterate(
const std::function<void(const T& stack)>& f) {
T* s = all_.load(std::memory_order_acquire);
while (s != nullptr) {
absl::MutexLock l(&s->init_mu);
if (s->dead == nullptr) {
f(*s);
}
s = s->next;
}
return dropped_samples_.load(std::memory_order_relaxed);
}
template <typename T>
void SampleRecorder<T>::SetMaxSamples(size_t max) {
max_samples_.store(max, std::memory_order_release);
}
template <typename T>
size_t SampleRecorder<T>::GetMaxSamples() const {
return max_samples_.load(std::memory_order_acquire);
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/profiling/internal/sample_recorder.h"
#include <atomic>
#include <random>
#include <vector>
#include "gmock/gmock.h"
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
namespace {
using ::absl::synchronization_internal::ThreadPool;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
struct Info : public Sample<Info> {
public:
void PrepareForSampling(int64_t w) { weight = w; }
std::atomic<size_t> size;
absl::Time create_time;
};
std::vector<size_t> GetSizes(SampleRecorder<Info>* s) {
std::vector<size_t> res;
s->Iterate([&](const Info& info) {
res.push_back(info.size.load(std::memory_order_acquire));
});
return res;
}
std::vector<int64_t> GetWeights(SampleRecorder<Info>* s) {
std::vector<int64_t> res;
s->Iterate([&](const Info& info) { res.push_back(info.weight); });
return res;
}
Info* Register(SampleRecorder<Info>* s, int64_t weight, size_t size) {
auto* info = s->Register(weight);
assert(info != nullptr);
info->size.store(size);
return info;
}
TEST(SampleRecorderTest, Registration) {
SampleRecorder<Info> sampler;
auto* info1 = Register(&sampler, 31, 1);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31));
auto* info2 = Register(&sampler, 32, 2);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
info1->size.store(3);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31, 32));
sampler.Unregister(info1);
sampler.Unregister(info2);
}
TEST(SampleRecorderTest, Unregistration) {
SampleRecorder<Info> sampler;
std::vector<Info*> infos;
for (size_t i = 0; i < 3; ++i) {
infos.push_back(Register(&sampler, 33 + i, i));
}
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 34, 35));
sampler.Unregister(infos[1]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35));
infos.push_back(Register(&sampler, 36, 3));
infos.push_back(Register(&sampler, 37, 4));
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 36, 37));
sampler.Unregister(infos[3]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 37));
sampler.Unregister(infos[0]);
sampler.Unregister(infos[2]);
sampler.Unregister(infos[4]);
EXPECT_THAT(GetSizes(&sampler), IsEmpty());
}
TEST(SampleRecorderTest, MultiThreaded) {
SampleRecorder<Info> sampler;
Notification stop;
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
pool.Schedule([&sampler, &stop, i]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<Info*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register(i));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
infoz.push_back(sampler.Register(i));
break;
}
case 1: {
size_t p =
std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
Info* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
EXPECT_EQ(info->weight, i);
sampler.Unregister(info);
break;
}
case 2: {
absl::Duration oldest = absl::ZeroDuration();
sampler.Iterate([&](const Info& info) {
oldest = std::max(oldest, absl::Now() - info.create_time);
});
ASSERT_GE(oldest, absl::ZeroDuration());
break;
}
}
}
});
}
absl::SleepFor(absl::Seconds(3));
stop.Notify();
}
TEST(SampleRecorderTest, Callback) {
SampleRecorder<Info> sampler;
auto* info1 = Register(&sampler, 39, 1);
auto* info2 = Register(&sampler, 40, 2);
static const Info* expected;
auto callback = [](const Info& info) {
EXPECT_EQ(&info, expected);
};
EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
expected = info1;
sampler.Unregister(info1);
EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
expected = nullptr;
sampler.Unregister(info2);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/profiling/internal/sample_recorder.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/profiling/internal/sample_recorder_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
2349c720-6615-482b-b85d-847d3c18aa94 | cpp | google/cel-cpp | set_util | eval/public/set_util.cc | eval/public/set_util_test.cc | #include "eval/public/set_util.h"
#include <algorithm>
#include <vector>
namespace google::api::expr::runtime {
namespace {
template <typename T>
int ComparisonImpl(T lhs, T rhs) {
if (lhs < rhs) {
return -1;
} else if (lhs > rhs) {
return 1;
} else {
return 0;
}
}
template <>
int ComparisonImpl(const CelError* lhs, const CelError* rhs) {
if (*lhs == *rhs) {
return 0;
}
return lhs < rhs ? -1 : 1;
}
template <>
int ComparisonImpl(CelValue::MessageWrapper lhs_wrapper,
CelValue::MessageWrapper rhs_wrapper) {
auto* lhs = lhs_wrapper.message_ptr();
auto* rhs = rhs_wrapper.message_ptr();
if (lhs < rhs) {
return -1;
} else if (lhs > rhs) {
return 1;
} else {
return 0;
}
}
template <>
int ComparisonImpl(const CelList* lhs, const CelList* rhs) {
int size_comparison = ComparisonImpl(lhs->size(), rhs->size());
if (size_comparison != 0) {
return size_comparison;
}
google::protobuf::Arena arena;
for (int i = 0; i < lhs->size(); i++) {
CelValue lhs_i = lhs->Get(&arena, i);
CelValue rhs_i = rhs->Get(&arena, i);
int value_comparison = CelValueCompare(lhs_i, rhs_i);
if (value_comparison != 0) {
return value_comparison;
}
}
return 0;
}
template <>
int ComparisonImpl(const CelMap* lhs, const CelMap* rhs) {
int size_comparison = ComparisonImpl(lhs->size(), rhs->size());
if (size_comparison != 0) {
return size_comparison;
}
google::protobuf::Arena arena;
std::vector<CelValue> lhs_keys;
std::vector<CelValue> rhs_keys;
lhs_keys.reserve(lhs->size());
rhs_keys.reserve(lhs->size());
const CelList* lhs_key_view = lhs->ListKeys(&arena).value();
const CelList* rhs_key_view = rhs->ListKeys(&arena).value();
for (int i = 0; i < lhs->size(); i++) {
lhs_keys.push_back(lhs_key_view->Get(&arena, i));
rhs_keys.push_back(rhs_key_view->Get(&arena, i));
}
std::sort(lhs_keys.begin(), lhs_keys.end(), &CelValueLessThan);
std::sort(rhs_keys.begin(), rhs_keys.end(), &CelValueLessThan);
for (size_t i = 0; i < lhs_keys.size(); i++) {
auto lhs_key_i = lhs_keys[i];
auto rhs_key_i = rhs_keys[i];
int key_comparison = CelValueCompare(lhs_key_i, rhs_key_i);
if (key_comparison != 0) {
return key_comparison;
}
auto lhs_value_i = lhs->Get(&arena, lhs_key_i).value();
auto rhs_value_i = rhs->Get(&arena, rhs_key_i).value();
int value_comparison = CelValueCompare(lhs_value_i, rhs_value_i);
if (value_comparison != 0) {
return value_comparison;
}
}
return 0;
}
struct ComparisonVisitor {
explicit ComparisonVisitor(CelValue rhs) : rhs(rhs) {}
template <typename T>
int operator()(T lhs_value) {
T rhs_value;
if (!rhs.GetValue(&rhs_value)) {
return ComparisonImpl(CelValue::Type(CelValue::IndexOf<T>::value),
rhs.type());
}
return ComparisonImpl(lhs_value, rhs_value);
}
CelValue rhs;
};
}
int CelValueCompare(CelValue lhs, CelValue rhs) {
return lhs.InternalVisit<int>(ComparisonVisitor(rhs));
}
bool CelValueLessThan(CelValue lhs, CelValue rhs) {
return lhs.InternalVisit<int>(ComparisonVisitor(rhs)) < 0;
}
bool CelValueEqual(CelValue lhs, CelValue rhs) {
return lhs.InternalVisit<int>(ComparisonVisitor(rhs)) == 0;
}
bool CelValueGreaterThan(CelValue lhs, CelValue rhs) {
return lhs.InternalVisit<int>(ComparisonVisitor(rhs)) > 0;
}
} | #include "eval/public/set_util.h"
#include <cstddef>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "google/protobuf/empty.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/message.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/unknown_set.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using google::protobuf::Arena;
using protobuf::Empty;
using protobuf::ListValue;
using protobuf::Struct;
constexpr char kExampleText[] = "abc";
constexpr char kExampleText2[] = "abd";
std::string* ExampleStr() {
static std::string* example = new std::string(kExampleText);
return example;
}
std::string* ExampleStr2() {
static std::string* example = new std::string(kExampleText2);
return example;
}
std::vector<CelValue> TypeExamples(Arena* arena) {
Empty* empty = Arena::Create<Empty>(arena);
Struct* proto_map = Arena::Create<Struct>(arena);
ListValue* proto_list = Arena::Create<ListValue>(arena);
UnknownSet* unknown_set = Arena::Create<UnknownSet>(arena);
return {CelValue::CreateBool(false),
CelValue::CreateInt64(0),
CelValue::CreateUint64(0),
CelValue::CreateDouble(0.0),
CelValue::CreateStringView(kExampleText),
CelValue::CreateBytes(ExampleStr()),
CelProtoWrapper::CreateMessage(empty, arena),
CelValue::CreateDuration(absl::ZeroDuration()),
CelValue::CreateTimestamp(absl::Now()),
CelProtoWrapper::CreateMessage(proto_list, arena),
CelProtoWrapper::CreateMessage(proto_map, arena),
CelValue::CreateUnknownSet(unknown_set),
CreateErrorValue(arena, "test", absl::StatusCode::kInternal)};
}
class TypeOrderingTest : public testing::TestWithParam<std::tuple<int, int>> {
public:
TypeOrderingTest() {
i_ = std::get<0>(GetParam());
j_ = std::get<1>(GetParam());
}
protected:
int i_;
int j_;
Arena arena_;
};
TEST_P(TypeOrderingTest, TypeLessThan) {
auto examples = TypeExamples(&arena_);
CelValue lhs = examples[i_];
CelValue rhs = examples[j_];
EXPECT_EQ(CelValueLessThan(lhs, rhs), i_ < j_);
EXPECT_EQ(CelValueEqual(lhs, rhs), i_ == j_);
}
std::string TypeOrderingTestName(
testing::TestParamInfo<std::tuple<int, int>> param) {
int i = std::get<0>(param.param);
int j = std::get<1>(param.param);
return absl::StrCat(CelValue::TypeName(CelValue::Type(i)), "_",
CelValue::TypeName(CelValue::Type(j)));
}
INSTANTIATE_TEST_SUITE_P(TypePairs, TypeOrderingTest,
testing::Combine(testing::Range(0, 13),
testing::Range(0, 13)),
&TypeOrderingTestName);
TEST(CelValueLessThanComparator, StdSetSupport) {
Arena arena;
auto examples = TypeExamples(&arena);
std::set<CelValue, CelValueLessThanComparator> value_set(&CelValueLessThan);
for (CelValue value : examples) {
auto insert = value_set.insert(value);
bool was_inserted = insert.second;
EXPECT_TRUE(was_inserted)
<< absl::StrCat("Insertion failed ", CelValue::TypeName(value.type()));
}
for (CelValue value : examples) {
auto insert = value_set.insert(value);
bool was_inserted = insert.second;
EXPECT_FALSE(was_inserted) << absl::StrCat(
"Re-insertion succeeded ", CelValue::TypeName(value.type()));
}
}
enum class ExpectedCmp { kEq, kLt, kGt };
struct PrimitiveCmpTestCase {
CelValue lhs;
CelValue rhs;
ExpectedCmp expected;
};
class PrimitiveCmpTest : public testing::TestWithParam<PrimitiveCmpTestCase> {
public:
PrimitiveCmpTest() {
lhs_ = GetParam().lhs;
rhs_ = GetParam().rhs;
expected_ = GetParam().expected;
}
protected:
CelValue lhs_;
CelValue rhs_;
ExpectedCmp expected_;
};
TEST_P(PrimitiveCmpTest, Basic) {
switch (expected_) {
case ExpectedCmp::kLt:
EXPECT_TRUE(CelValueLessThan(lhs_, rhs_));
break;
case ExpectedCmp::kGt:
EXPECT_TRUE(CelValueGreaterThan(lhs_, rhs_));
break;
case ExpectedCmp::kEq:
EXPECT_TRUE(CelValueEqual(lhs_, rhs_));
break;
}
}
std::string PrimitiveCmpTestName(
testing::TestParamInfo<PrimitiveCmpTestCase> info) {
absl::string_view cmp_name;
switch (info.param.expected) {
case ExpectedCmp::kEq:
cmp_name = "Eq";
break;
case ExpectedCmp::kLt:
cmp_name = "Lt";
break;
case ExpectedCmp::kGt:
cmp_name = "Gt";
break;
}
return absl::StrCat(CelValue::TypeName(info.param.lhs.type()), "_", cmp_name);
}
INSTANTIATE_TEST_SUITE_P(
Pairs, PrimitiveCmpTest,
testing::ValuesIn(std::vector<PrimitiveCmpTestCase>{
{CelValue::CreateStringView(kExampleText),
CelValue::CreateStringView(kExampleText), ExpectedCmp::kEq},
{CelValue::CreateStringView(kExampleText),
CelValue::CreateStringView(kExampleText2), ExpectedCmp::kLt},
{CelValue::CreateStringView(kExampleText2),
CelValue::CreateStringView(kExampleText), ExpectedCmp::kGt},
{CelValue::CreateBytes(ExampleStr()),
CelValue::CreateBytes(ExampleStr()), ExpectedCmp::kEq},
{CelValue::CreateBytes(ExampleStr()),
CelValue::CreateBytes(ExampleStr2()), ExpectedCmp::kLt},
{CelValue::CreateBytes(ExampleStr2()),
CelValue::CreateBytes(ExampleStr()), ExpectedCmp::kGt},
{CelValue::CreateBool(false), CelValue::CreateBool(false),
ExpectedCmp::kEq},
{CelValue::CreateBool(false), CelValue::CreateBool(true),
ExpectedCmp::kLt},
{CelValue::CreateBool(true), CelValue::CreateBool(false),
ExpectedCmp::kGt},
{CelValue::CreateInt64(1), CelValue::CreateInt64(1), ExpectedCmp::kEq},
{CelValue::CreateInt64(1), CelValue::CreateInt64(2), ExpectedCmp::kLt},
{CelValue::CreateInt64(2), CelValue::CreateInt64(1), ExpectedCmp::kGt},
{CelValue::CreateUint64(1), CelValue::CreateUint64(1),
ExpectedCmp::kEq},
{CelValue::CreateUint64(1), CelValue::CreateUint64(2),
ExpectedCmp::kLt},
{CelValue::CreateUint64(2), CelValue::CreateUint64(1),
ExpectedCmp::kGt},
{CelValue::CreateDuration(absl::Minutes(1)),
CelValue::CreateDuration(absl::Minutes(1)), ExpectedCmp::kEq},
{CelValue::CreateDuration(absl::Minutes(1)),
CelValue::CreateDuration(absl::Minutes(2)), ExpectedCmp::kLt},
{CelValue::CreateDuration(absl::Minutes(2)),
CelValue::CreateDuration(absl::Minutes(1)), ExpectedCmp::kGt},
{CelValue::CreateTimestamp(absl::FromUnixSeconds(1)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(1)), ExpectedCmp::kEq},
{CelValue::CreateTimestamp(absl::FromUnixSeconds(1)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(2)), ExpectedCmp::kLt},
{CelValue::CreateTimestamp(absl::FromUnixSeconds(2)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(1)),
ExpectedCmp::kGt}}),
&PrimitiveCmpTestName);
TEST(CelValueLessThan, PtrCmpMessage) {
Arena arena;
CelValue lhs =
CelProtoWrapper::CreateMessage(Arena::Create<Empty>(&arena), &arena);
CelValue rhs =
CelProtoWrapper::CreateMessage(Arena::Create<Empty>(&arena), &arena);
if (lhs.MessageOrDie() > rhs.MessageOrDie()) {
std::swap(lhs, rhs);
}
EXPECT_TRUE(CelValueLessThan(lhs, rhs));
EXPECT_FALSE(CelValueLessThan(rhs, lhs));
EXPECT_FALSE(CelValueLessThan(lhs, lhs));
}
TEST(CelValueLessThan, PtrCmpUnknownSet) {
Arena arena;
CelValue lhs = CelValue::CreateUnknownSet(Arena::Create<UnknownSet>(&arena));
CelValue rhs = CelValue::CreateUnknownSet(Arena::Create<UnknownSet>(&arena));
if (lhs.UnknownSetOrDie() > rhs.UnknownSetOrDie()) {
std::swap(lhs, rhs);
}
EXPECT_TRUE(CelValueLessThan(lhs, rhs));
EXPECT_FALSE(CelValueLessThan(rhs, lhs));
EXPECT_FALSE(CelValueLessThan(lhs, lhs));
}
TEST(CelValueLessThan, PtrCmpError) {
Arena arena;
CelValue lhs = CreateErrorValue(&arena, "test1", absl::StatusCode::kInternal);
CelValue rhs = CreateErrorValue(&arena, "test2", absl::StatusCode::kInternal);
if (lhs.ErrorOrDie() > rhs.ErrorOrDie()) {
std::swap(lhs, rhs);
}
EXPECT_TRUE(CelValueLessThan(lhs, rhs));
EXPECT_FALSE(CelValueLessThan(rhs, lhs));
EXPECT_FALSE(CelValueLessThan(lhs, lhs));
}
TEST(CelValueLessThan, CelListSameSize) {
ContainerBackedListImpl cel_list_1(std::vector<CelValue>{
CelValue::CreateInt64(1), CelValue::CreateInt64(2)});
ContainerBackedListImpl cel_list_2(std::vector<CelValue>{
CelValue::CreateInt64(1), CelValue::CreateInt64(3)});
EXPECT_TRUE(CelValueLessThan(CelValue::CreateList(&cel_list_1),
CelValue::CreateList(&cel_list_2)));
}
TEST(CelValueLessThan, CelListDifferentSizes) {
ContainerBackedListImpl cel_list_1(
std::vector<CelValue>{CelValue::CreateInt64(2)});
ContainerBackedListImpl cel_list_2(std::vector<CelValue>{
CelValue::CreateInt64(1), CelValue::CreateInt64(3)});
EXPECT_TRUE(CelValueLessThan(CelValue::CreateList(&cel_list_1),
CelValue::CreateList(&cel_list_2)));
}
TEST(CelValueLessThan, CelListEqual) {
ContainerBackedListImpl cel_list_1(std::vector<CelValue>{
CelValue::CreateInt64(1), CelValue::CreateInt64(2)});
ContainerBackedListImpl cel_list_2(std::vector<CelValue>{
CelValue::CreateInt64(1), CelValue::CreateInt64(2)});
EXPECT_FALSE(CelValueLessThan(CelValue::CreateList(&cel_list_1),
CelValue::CreateList(&cel_list_2)));
EXPECT_TRUE(CelValueEqual(CelValue::CreateList(&cel_list_2),
CelValue::CreateList(&cel_list_1)));
}
TEST(CelValueLessThan, CelListSupportProtoListCompatible) {
Arena arena;
ListValue list_value;
list_value.add_values()->set_bool_value(true);
list_value.add_values()->set_number_value(1.0);
list_value.add_values()->set_string_value("abc");
CelValue proto_list = CelProtoWrapper::CreateMessage(&list_value, &arena);
ASSERT_TRUE(proto_list.IsList());
std::vector<CelValue> list_values{CelValue::CreateBool(true),
CelValue::CreateDouble(1.0),
CelValue::CreateStringView("abd")};
ContainerBackedListImpl list_backing(list_values);
CelValue cel_list = CelValue::CreateList(&list_backing);
EXPECT_TRUE(CelValueLessThan(proto_list, cel_list));
}
TEST(CelValueLessThan, CelMapSameSize) {
std::vector<std::pair<CelValue, CelValue>> values{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(3), CelValue::CreateInt64(6)}};
auto cel_map_backing_1 =
CreateContainerBackedMap(absl::MakeSpan(values)).value();
std::vector<std::pair<CelValue, CelValue>> values2{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(4), CelValue::CreateInt64(6)}};
auto cel_map_backing_2 =
CreateContainerBackedMap(absl::MakeSpan(values2)).value();
std::vector<std::pair<CelValue, CelValue>> values3{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(3), CelValue::CreateInt64(8)}};
auto cel_map_backing_3 =
CreateContainerBackedMap(absl::MakeSpan(values3)).value();
CelValue map1 = CelValue::CreateMap(cel_map_backing_1.get());
CelValue map2 = CelValue::CreateMap(cel_map_backing_2.get());
CelValue map3 = CelValue::CreateMap(cel_map_backing_3.get());
EXPECT_TRUE(CelValueLessThan(map1, map2));
EXPECT_TRUE(CelValueLessThan(map1, map3));
EXPECT_TRUE(CelValueLessThan(map3, map2));
}
TEST(CelValueLessThan, CelMapDifferentSizes) {
std::vector<std::pair<CelValue, CelValue>> values{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(2), CelValue::CreateInt64(4)}};
auto cel_map_1 = CreateContainerBackedMap(absl::MakeSpan(values)).value();
std::vector<std::pair<CelValue, CelValue>> values2{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(2), CelValue::CreateInt64(4)},
{CelValue::CreateInt64(3), CelValue::CreateInt64(6)}};
auto cel_map_2 = CreateContainerBackedMap(absl::MakeSpan(values2)).value();
EXPECT_TRUE(CelValueLessThan(CelValue::CreateMap(cel_map_1.get()),
CelValue::CreateMap(cel_map_2.get())));
}
TEST(CelValueLessThan, CelMapEqual) {
std::vector<std::pair<CelValue, CelValue>> values{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(2), CelValue::CreateInt64(4)},
{CelValue::CreateInt64(3), CelValue::CreateInt64(6)}};
auto cel_map_1 = CreateContainerBackedMap(absl::MakeSpan(values)).value();
std::vector<std::pair<CelValue, CelValue>> values2{
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(2), CelValue::CreateInt64(4)},
{CelValue::CreateInt64(3), CelValue::CreateInt64(6)}};
auto cel_map_2 = CreateContainerBackedMap(absl::MakeSpan(values2)).value();
EXPECT_FALSE(CelValueLessThan(CelValue::CreateMap(cel_map_1.get()),
CelValue::CreateMap(cel_map_2.get())));
EXPECT_TRUE(CelValueEqual(CelValue::CreateMap(cel_map_2.get()),
CelValue::CreateMap(cel_map_1.get())));
}
TEST(CelValueLessThan, CelMapSupportProtoMapCompatible) {
Arena arena;
const std::vector<std::string> kFields = {"field1", "field2", "field3"};
Struct value_struct;
auto& value1 = (*value_struct.mutable_fields())[kFields[0]];
value1.set_bool_value(true);
auto& value2 = (*value_struct.mutable_fields())[kFields[1]];
value2.set_number_value(1.0);
auto& value3 = (*value_struct.mutable_fields())[kFields[2]];
value3.set_string_value("test");
CelValue proto_struct = CelProtoWrapper::CreateMessage(&value_struct, &arena);
ASSERT_TRUE(proto_struct.IsMap());
std::vector<std::pair<CelValue, CelValue>> values{
{CelValue::CreateStringView(kFields[2]),
CelValue::CreateStringView("test")},
{CelValue::CreateStringView(kFields[1]), CelValue::CreateDouble(1.0)},
{CelValue::CreateStringView(kFields[0]), CelValue::CreateBool(true)}};
auto backing_map = CreateContainerBackedMap(absl::MakeSpan(values)).value();
CelValue cel_map = CelValue::CreateMap(backing_map.get());
EXPECT_TRUE(!CelValueLessThan(cel_map, proto_struct) &&
!CelValueGreaterThan(cel_map, proto_struct));
}
TEST(CelValueLessThan, NestedMap) {
Arena arena;
ListValue list_value;
list_value.add_values()->set_bool_value(true);
list_value.add_values()->set_number_value(1.0);
list_value.add_values()->set_string_value("test");
std::vector<CelValue> list_values{CelValue::CreateBool(true),
CelValue::CreateDouble(1.0),
CelValue::CreateStringView("test")};
ContainerBackedListImpl list_backing(list_values);
CelValue cel_list = CelValue::CreateList(&list_backing);
Struct value_struct;
*(value_struct.mutable_fields()->operator[]("field").mutable_list_value()) =
list_value;
std::vector<std::pair<CelValue, CelValue>> values{
{CelValue::CreateStringView("field"), cel_list}};
auto backing_map = CreateContainerBackedMap(absl::MakeSpan(values)).value();
CelValue cel_map = CelValue::CreateMap(backing_map.get());
CelValue proto_map = CelProtoWrapper::CreateMessage(&value_struct, &arena);
EXPECT_TRUE(!CelValueLessThan(cel_map, proto_map) &&
!CelValueLessThan(proto_map, cel_map));
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/set_util.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/set_util_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
10bb58ac-bb24-4434-b6f2-4ba070e2f308 | cpp | tensorflow/tensorflow | reduce_scatter_reassociate | third_party/xla/xla/service/reduce_scatter_reassociate.cc | third_party/xla/xla/service/reduce_scatter_reassociate_test.cc | #include "xla/service/reduce_scatter_reassociate.h"
#include <optional>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool AreCompatible(const HloReduceScatterInstruction *rs0,
const HloReduceScatterInstruction *rs1,
ReductionKind op_kind) {
std::optional<AllReduceKey> key0 = GetAllReduceKey(rs0);
std::optional<AllReduceKey> key1 = GetAllReduceKey(rs1);
auto kind0 = MatchReductionComputation(rs0->to_apply());
auto dims_match = rs0->scatter_dimension() == rs1->scatter_dimension();
return key0 && key1 && kind0 && *key0 == *key1 && kind0 == op_kind &&
dims_match;
}
}
absl::StatusOr<bool> ReduceScatterReassociate::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedCollective(
*module, HloOpcode::kReduceScatter)) {
VLOG(1)
<< "Skip ReduceScatterReassociate because the module contains reduce-"
"scatter with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
std::optional<ReductionKind> kind = MatchReductionInstruction(inst);
if (!kind || inst->operand(0)->opcode() != HloOpcode::kReduceScatter ||
inst->operand(1)->opcode() != HloOpcode::kReduceScatter ||
!inst->shape().IsArray()) {
continue;
}
auto *rs0 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(0));
auto *rs1 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(1));
if (!AreCompatible(rs0, rs1, *kind)) {
VLOG(2) << "Reduce-Scatter operations are not compatible, skipping";
continue;
}
if (rs0->user_count() != 1 || rs1->user_count() != 1) {
VLOG(2) << "Reduce-Scatter operations have > 1 users";
continue;
}
HloInstruction *new_op =
computation->AddInstruction(inst->CloneWithNewOperands(
rs0->mutable_operand(0)->shape(),
{rs0->mutable_operand(0), rs1->mutable_operand(0)}));
HloInstruction *new_rs = computation->AddInstruction(
rs0->CloneWithNewOperands(inst->shape(), {new_op}));
if (new_rs->channel_id()) {
new_rs->set_channel_id(next_channel_id++);
}
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_rs));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs0));
if (rs0 != rs1) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs1));
}
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_reassociate.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class ReduceScatterReassociateTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed = ReduceScatterReassociate().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t ReduceScatterCount(std::unique_ptr<HloModule>& module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kReduceScatter>);
}
};
TEST_F(ReduceScatterReassociateTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, SimpleWithConstrainLayout) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, constrain_layout=true, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, constrain_layout=true, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, SimpleChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum
rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum
add0 = f32[4] add(rs0, rs1)
add1 = f32[4] add(add0, rs2)
ROOT add2 = f32[4] add(add1, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(
m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),
m::Parameter(3))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, SimpleTree) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum
rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum
add0 = f32[4] add(rs0, rs1)
add1 = f32[4] add(rs2, rs3)
ROOT add2 = f32[4] add(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Add(m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, MismatchOp0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchOp1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=max
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchDimension) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
rs0 = f32[8,8] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[8,8] reduce-scatter(p1), dimensions={1}, to_apply=sum
ROOT add = f32[8,8] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0}}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchHasChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, channel_id=3, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchUseGlobalDeviceId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1}}, channel_id=3, use_global_device_ids=true, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={{0,1}}, channel_id=4, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, NotSingleUser) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
add = f32[4] add(rs0, rs1)
ROOT t = (f32[4], f32[4]) tuple(rs0, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, DoubleUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
add = f32[4] add(rs0, rs0)
ROOT c = f32[4] copy(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c3ea12b-9590-4f05-917c-0adaca0d8b2d | cpp | google/quiche | recording_http2_visitor | quiche/http2/adapter/recording_http2_visitor.cc | quiche/http2/adapter/recording_http2_visitor_test.cc | #include "quiche/http2/adapter/recording_http2_visitor.h"
#include "absl/strings/str_format.h"
#include "quiche/http2/adapter/http2_protocol.h"
#include "quiche/http2/adapter/http2_util.h"
namespace http2 {
namespace adapter {
namespace test {
int64_t RecordingHttp2Visitor::OnReadyToSend(absl::string_view serialized) {
events_.push_back(absl::StrFormat("OnReadyToSend %d", serialized.size()));
return serialized.size();
}
Http2VisitorInterface::DataFrameHeaderInfo
RecordingHttp2Visitor::OnReadyToSendDataForStream(Http2StreamId stream_id,
size_t max_length) {
events_.push_back(absl::StrFormat("OnReadyToSendDataForStream %d %d",
stream_id, max_length));
return {70000, true, true};
}
bool RecordingHttp2Visitor::SendDataFrame(Http2StreamId stream_id,
absl::string_view ,
size_t payload_bytes) {
events_.push_back(
absl::StrFormat("SendDataFrame %d %d", stream_id, payload_bytes));
return true;
}
void RecordingHttp2Visitor::OnConnectionError(ConnectionError error) {
events_.push_back(
absl::StrFormat("OnConnectionError %s", ConnectionErrorToString(error)));
}
bool RecordingHttp2Visitor::OnFrameHeader(Http2StreamId stream_id,
size_t length, uint8_t type,
uint8_t flags) {
events_.push_back(absl::StrFormat("OnFrameHeader %d %d %d %d", stream_id,
length, type, flags));
return true;
}
void RecordingHttp2Visitor::OnSettingsStart() {
events_.push_back("OnSettingsStart");
}
void RecordingHttp2Visitor::OnSetting(Http2Setting setting) {
events_.push_back(absl::StrFormat(
"OnSetting %s %d", Http2SettingsIdToString(setting.id), setting.value));
}
void RecordingHttp2Visitor::OnSettingsEnd() {
events_.push_back("OnSettingsEnd");
}
void RecordingHttp2Visitor::OnSettingsAck() {
events_.push_back("OnSettingsAck");
}
bool RecordingHttp2Visitor::OnBeginHeadersForStream(Http2StreamId stream_id) {
events_.push_back(absl::StrFormat("OnBeginHeadersForStream %d", stream_id));
return true;
}
Http2VisitorInterface::OnHeaderResult RecordingHttp2Visitor::OnHeaderForStream(
Http2StreamId stream_id, absl::string_view name, absl::string_view value) {
events_.push_back(
absl::StrFormat("OnHeaderForStream %d %s %s", stream_id, name, value));
return HEADER_OK;
}
bool RecordingHttp2Visitor::OnEndHeadersForStream(Http2StreamId stream_id) {
events_.push_back(absl::StrFormat("OnEndHeadersForStream %d", stream_id));
return true;
}
bool RecordingHttp2Visitor::OnDataPaddingLength(Http2StreamId stream_id,
size_t padding_length) {
events_.push_back(
absl::StrFormat("OnDataPaddingLength %d %d", stream_id, padding_length));
return true;
}
bool RecordingHttp2Visitor::OnBeginDataForStream(Http2StreamId stream_id,
size_t payload_length) {
events_.push_back(
absl::StrFormat("OnBeginDataForStream %d %d", stream_id, payload_length));
return true;
}
bool RecordingHttp2Visitor::OnDataForStream(Http2StreamId stream_id,
absl::string_view data) {
events_.push_back(absl::StrFormat("OnDataForStream %d %s", stream_id, data));
return true;
}
bool RecordingHttp2Visitor::OnEndStream(Http2StreamId stream_id) {
events_.push_back(absl::StrFormat("OnEndStream %d", stream_id));
return true;
}
void RecordingHttp2Visitor::OnRstStream(Http2StreamId stream_id,
Http2ErrorCode error_code) {
events_.push_back(absl::StrFormat("OnRstStream %d %s", stream_id,
Http2ErrorCodeToString(error_code)));
}
bool RecordingHttp2Visitor::OnCloseStream(Http2StreamId stream_id,
Http2ErrorCode error_code) {
events_.push_back(absl::StrFormat("OnCloseStream %d %s", stream_id,
Http2ErrorCodeToString(error_code)));
return true;
}
void RecordingHttp2Visitor::OnPriorityForStream(Http2StreamId stream_id,
Http2StreamId parent_stream_id,
int weight, bool exclusive) {
events_.push_back(absl::StrFormat("OnPriorityForStream %d %d %d %d",
stream_id, parent_stream_id, weight,
exclusive));
}
void RecordingHttp2Visitor::OnPing(Http2PingId ping_id, bool is_ack) {
events_.push_back(absl::StrFormat("OnPing %d %d", ping_id, is_ack));
}
void RecordingHttp2Visitor::OnPushPromiseForStream(
Http2StreamId stream_id, Http2StreamId promised_stream_id) {
events_.push_back(absl::StrFormat("OnPushPromiseForStream %d %d", stream_id,
promised_stream_id));
}
bool RecordingHttp2Visitor::OnGoAway(Http2StreamId last_accepted_stream_id,
Http2ErrorCode error_code,
absl::string_view opaque_data) {
events_.push_back(
absl::StrFormat("OnGoAway %d %s %s", last_accepted_stream_id,
Http2ErrorCodeToString(error_code), opaque_data));
return true;
}
void RecordingHttp2Visitor::OnWindowUpdate(Http2StreamId stream_id,
int window_increment) {
events_.push_back(
absl::StrFormat("OnWindowUpdate %d %d", stream_id, window_increment));
}
int RecordingHttp2Visitor::OnBeforeFrameSent(uint8_t frame_type,
Http2StreamId stream_id,
size_t length, uint8_t flags) {
events_.push_back(absl::StrFormat("OnBeforeFrameSent %d %d %d %d", frame_type,
stream_id, length, flags));
return 0;
}
int RecordingHttp2Visitor::OnFrameSent(uint8_t frame_type,
Http2StreamId stream_id, size_t length,
uint8_t flags, uint32_t error_code) {
events_.push_back(absl::StrFormat("OnFrameSent %d %d %d %d %d", frame_type,
stream_id, length, flags, error_code));
return 0;
}
bool RecordingHttp2Visitor::OnInvalidFrame(Http2StreamId stream_id,
InvalidFrameError error) {
events_.push_back(absl::StrFormat("OnInvalidFrame %d %s", stream_id,
InvalidFrameErrorToString(error)));
return true;
}
void RecordingHttp2Visitor::OnBeginMetadataForStream(Http2StreamId stream_id,
size_t payload_length) {
events_.push_back(absl::StrFormat("OnBeginMetadataForStream %d %d", stream_id,
payload_length));
}
bool RecordingHttp2Visitor::OnMetadataForStream(Http2StreamId stream_id,
absl::string_view metadata) {
events_.push_back(
absl::StrFormat("OnMetadataForStream %d %s", stream_id, metadata));
return true;
}
bool RecordingHttp2Visitor::OnMetadataEndForStream(Http2StreamId stream_id) {
events_.push_back(absl::StrFormat("OnMetadataEndForStream %d", stream_id));
return true;
}
std::pair<int64_t, bool> RecordingHttp2Visitor::PackMetadataForStream(
Http2StreamId stream_id, uint8_t* , size_t ) {
events_.push_back(absl::StrFormat("PackMetadataForStream %d", stream_id));
return {1, true};
}
void RecordingHttp2Visitor::OnErrorDebug(absl::string_view message) {
events_.push_back(absl::StrFormat("OnErrorDebug %s", message));
}
}
}
} | #include "quiche/http2/adapter/recording_http2_visitor.h"
#include <list>
#include <string>
#include "quiche/http2/adapter/http2_protocol.h"
#include "quiche/http2/adapter/http2_visitor_interface.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
using ::testing::IsEmpty;
TEST(RecordingHttp2VisitorTest, EmptySequence) {
RecordingHttp2Visitor chocolate_visitor;
RecordingHttp2Visitor vanilla_visitor;
EXPECT_THAT(chocolate_visitor.GetEventSequence(), IsEmpty());
EXPECT_THAT(vanilla_visitor.GetEventSequence(), IsEmpty());
EXPECT_EQ(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
chocolate_visitor.OnSettingsStart();
EXPECT_THAT(chocolate_visitor.GetEventSequence(), testing::Not(IsEmpty()));
EXPECT_THAT(vanilla_visitor.GetEventSequence(), IsEmpty());
EXPECT_NE(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
chocolate_visitor.Clear();
EXPECT_THAT(chocolate_visitor.GetEventSequence(), IsEmpty());
EXPECT_THAT(vanilla_visitor.GetEventSequence(), IsEmpty());
EXPECT_EQ(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
}
TEST(RecordingHttp2VisitorTest, SameEventsProduceSameSequence) {
RecordingHttp2Visitor chocolate_visitor;
RecordingHttp2Visitor vanilla_visitor;
http2::test::Http2Random random;
const Http2StreamId stream_id = random.Uniform(kMaxStreamId);
const Http2StreamId another_stream_id = random.Uniform(kMaxStreamId);
const size_t length = random.Rand16();
const uint8_t type = random.Rand8();
const uint8_t flags = random.Rand8();
const Http2ErrorCode error_code = static_cast<Http2ErrorCode>(
random.Uniform(static_cast<int>(Http2ErrorCode::MAX_ERROR_CODE)));
const Http2Setting setting = {random.Rand16(), random.Rand32()};
const absl::string_view alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-";
const std::string some_string =
random.RandStringWithAlphabet(random.Rand8(), alphabet);
const std::string another_string =
random.RandStringWithAlphabet(random.Rand8(), alphabet);
const uint16_t some_int = random.Rand16();
const bool some_bool = random.OneIn(2);
std::list<RecordingHttp2Visitor*> visitors = {&chocolate_visitor,
&vanilla_visitor};
for (RecordingHttp2Visitor* visitor : visitors) {
visitor->OnConnectionError(
Http2VisitorInterface::ConnectionError::kSendError);
visitor->OnFrameHeader(stream_id, length, type, flags);
visitor->OnSettingsStart();
visitor->OnSetting(setting);
visitor->OnSettingsEnd();
visitor->OnSettingsAck();
visitor->OnBeginHeadersForStream(stream_id);
visitor->OnHeaderForStream(stream_id, some_string, another_string);
visitor->OnEndHeadersForStream(stream_id);
visitor->OnBeginDataForStream(stream_id, length);
visitor->OnDataForStream(stream_id, some_string);
visitor->OnDataForStream(stream_id, another_string);
visitor->OnEndStream(stream_id);
visitor->OnRstStream(stream_id, error_code);
visitor->OnCloseStream(stream_id, error_code);
visitor->OnPriorityForStream(stream_id, another_stream_id, some_int,
some_bool);
visitor->OnPing(some_int, some_bool);
visitor->OnPushPromiseForStream(stream_id, another_stream_id);
visitor->OnGoAway(stream_id, error_code, some_string);
visitor->OnWindowUpdate(stream_id, some_int);
visitor->OnBeginMetadataForStream(stream_id, length);
visitor->OnMetadataForStream(stream_id, some_string);
visitor->OnMetadataForStream(stream_id, another_string);
visitor->OnMetadataEndForStream(stream_id);
}
EXPECT_EQ(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
}
TEST(RecordingHttp2VisitorTest, DifferentEventsProduceDifferentSequence) {
RecordingHttp2Visitor chocolate_visitor;
RecordingHttp2Visitor vanilla_visitor;
EXPECT_EQ(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
const Http2StreamId stream_id = 1;
const size_t length = 42;
chocolate_visitor.OnBeginDataForStream(stream_id, length);
vanilla_visitor.OnBeginMetadataForStream(stream_id, length);
EXPECT_NE(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
chocolate_visitor.Clear();
vanilla_visitor.Clear();
EXPECT_EQ(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
chocolate_visitor.OnBeginHeadersForStream(stream_id);
vanilla_visitor.OnBeginHeadersForStream(stream_id + 2);
EXPECT_NE(chocolate_visitor.GetEventSequence(),
vanilla_visitor.GetEventSequence());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/recording_http2_visitor.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/recording_http2_visitor_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ff66b5ca-2709-4292-b7be-cb7d378338e4 | cpp | tensorflow/tensorflow | command_buffer_cmd | third_party/xla/xla/service/gpu/runtime/command_buffer_cmd.cc | third_party/xla/xla/service/gpu/runtime/command_buffer_cmd_test.cc | #include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/debug_options_flags.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/call_frame.h"
#include "xla/ffi/ffi_api.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/runtime/annotation.h"
#include "xla/service/gpu/runtime/nccl_all_gather_thunk.h"
#include "xla/service/gpu/runtime/nccl_all_reduce_thunk.h"
#include "xla/service/gpu/runtime/nccl_all_to_all_thunk.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/gpu/runtime/nccl_collective_broadcast_thunk.h"
#include "xla/service/gpu/runtime/nccl_collective_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/lazy_op_runner.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/trace_command_buffer_factory.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#endif
namespace xla::gpu {
namespace {
std::optional<se::DeviceMemoryBase> AssignBufferIfNotNull(
const BufferAllocations& buffer_allocations,
BufferAllocation::Slice& slice) {
return slice.allocation() != nullptr
? std::optional<se::DeviceMemoryBase>{buffer_allocations
.GetDeviceAddress(slice)}
: std::nullopt;
}
}
using ExecutionScopeId = se::CommandBuffer::ExecutionScopeId;
using MemoryAccess = CommandBufferCmd::MemoryAccess;
std::string CommandBufferCmdString(CommandBufferCmdType type) {
switch (type) {
#define CASE_CMD_STRING(enum_name, cmd_name, ...) \
case CommandBufferCmdType::enum_name: \
return cmd_name;
COMMAND_BUFFER_CMD_LIST(CASE_CMD_STRING)
#undef CASE_CMD_STRING
default:
return "UnknownCmd";
}
}
static std::string_view ReductionKindString(ReductionKind kind) {
switch (kind) {
case ReductionKind::MAX:
return "max";
case ReductionKind::MIN:
return "min";
case ReductionKind::PRODUCT:
return "product";
case ReductionKind::SUM:
return "sum";
}
}
static se::CommandBuffer::Builder CreateBuilder(
CommandBufferCmdSequence* commands,
const Thunk::ExecuteParams* execute_params,
const CommandBufferCmd::RecordParams* record_params) {
return [=](se::CommandBuffer* command_buffer) {
return commands->Record(*execute_params, *record_params, command_buffer,
CommandBufferCmdSequence::RecordMode::kConditional);
};
}
static std::vector<se::CommandBuffer::Builder> CreateBuilders(
absl::Span<CommandBufferCmdSequence> commands,
const Thunk::ExecuteParams* execute_params,
const CommandBufferCmd::RecordParams* record_params) {
std::vector<se::CommandBuffer::Builder> builders;
for (CommandBufferCmdSequence& cmd : commands) {
builders.push_back(CreateBuilder(&cmd, execute_params, record_params));
}
return builders;
}
static se::CommandBuffer::ExecutionScopeBuilder CreateExecutionScopeBuilder(
CommandBufferCmdSequence* commands,
const Thunk::ExecuteParams* execute_params,
const CommandBufferCmd::RecordParams* record_params) {
return [=](ExecutionScopeId id, se::CommandBuffer* command_buffer) {
CommandBufferCmd::RecordParams params = *record_params;
params.execution_scope_id = id;
return commands->Record(*execute_params, params, command_buffer,
CommandBufferCmdSequence::RecordMode::kConditional);
};
}
CommandBufferCmd::State* CommandBufferCmd::StateManager::GetOrNull(
const CommandBufferCmd* cmd) {
if (auto it = state_.find(cmd); it != state_.end()) {
return it->second.get();
}
return nullptr;
}
CommandBufferCmd::State* CommandBufferCmd::StateManager::GetOrCreate(
const CommandBufferCmd* cmd,
absl::FunctionRef<std::unique_ptr<State>()> create) {
if (auto it = state_.find(cmd); it != state_.end()) {
return it->second.get();
}
return state_.try_emplace(cmd, create()).first->second.get();
}
se::CommandBuffer::ExecutionScopeId CommandBufferCmd::GetExecutionScope(
const RecordParams& record_params,
ExecutionStreamId execution_stream_id) const {
uint64_t base = record_params.execution_scope_id.value();
uint64_t offset = execution_stream_id.value();
return se::CommandBuffer::ExecutionScopeId(base + offset);
}
se::CommandBuffer::ExecutionScopeId CommandBufferCmd::GetExecutionScope(
const RecordParams& record_params) const {
return GetExecutionScope(record_params, execution_stream_id_);
}
CommandBufferCmdSequence::CommandBufferCmdSequence(
SynchronizationMode synchronization_mode)
: synchronization_mode_(synchronization_mode) {}
void CommandBufferCmdSequence::Append(std::unique_ptr<CommandBufferCmd> cmd) {
for (const CommandBufferCmd::BufferUsage& buffer : cmd->buffers()) {
buffers_.insert(buffer);
allocs_indices_.insert(buffer.slice.index());
}
ExecutionStreamId execution_stream_id = cmd->execution_stream_id();
CommandBufferCmd::BufferUsageVector buffers = cmd->buffers();
bool requires_barrier = HasConflicts(execution_stream_id, buffers);
if (synchronization_mode_ == SynchronizationMode::kSerialize &&
!commands_.empty()) {
requires_barrier = true;
}
if (commands_.size() == 1 && commands_.front().cmd->IsNestedCommandBuffer()) {
requires_barrier = true;
}
if (requires_barrier) ClearTrackedBuffers(execution_stream_id);
commands_.push_back({std::move(cmd), requires_barrier});
TrackBuffers(execution_stream_id, buffers);
}
absl::Status CommandBufferCmdSequence::Prepare(
const Thunk::PrepareParams& params,
Thunk::ResourceRequests& resource_requests) {
for (auto& command : commands_) {
TF_RETURN_IF_ERROR(command.cmd->Prepare(params, resource_requests));
}
return absl::OkStatus();
}
absl::Status CommandBufferCmdSequence::Initialize(
const Thunk::InitializeParams& params,
CommandBufferCmd::StateManager& state) {
for (auto& command : commands_) {
TF_RETURN_IF_ERROR(command.cmd->Initialize(params, state));
}
return absl::OkStatus();
}
bool CommandBufferCmdSequence::HasConflicts(
ExecutionStreamId execution_stream_id,
const CommandBufferCmd::BufferUsageVector& buffers) {
auto& rwset = read_write_sets_[execution_stream_id];
auto read_overlap = [&](const BufferAllocation::Slice& slice) {
if (rwset.read.contains(slice)) return true;
for (auto& read : rwset.read)
if (read.OverlapsWith(slice)) return true;
return false;
};
auto write_overlap = [&](const BufferAllocation::Slice& slice) {
if (rwset.write.contains(slice)) return true;
for (auto& write : rwset.write)
if (write.OverlapsWith(slice)) return true;
return false;
};
return absl::c_any_of(buffers, [&](const auto& buffer) {
return buffer.access == MemoryAccess::kWrite
? write_overlap(buffer.slice) || read_overlap(buffer.slice)
: write_overlap(buffer.slice);
});
}
void CommandBufferCmdSequence::TrackBuffers(
ExecutionStreamId execution_stream_id,
const CommandBufferCmd::BufferUsageVector& buffers) {
auto& rwset = read_write_sets_[execution_stream_id];
for (const CommandBufferCmd::BufferUsage& buffer : buffers) {
if (buffer.access == MemoryAccess::kWrite) rwset.write.insert(buffer.slice);
if (buffer.access == MemoryAccess::kRead) rwset.read.insert(buffer.slice);
}
}
void CommandBufferCmdSequence::ClearTrackedBuffers(
ExecutionStreamId execution_stream_id) {
read_write_sets_[execution_stream_id] = ReadWriteSet();
}
static std::string_view RecordModeString(
CommandBufferCmdSequence::RecordMode mode) {
switch (mode) {
case CommandBufferCmdSequence::RecordMode::kExclusive:
return "exclusive";
case CommandBufferCmdSequence::RecordMode::kConditional:
return "conditional";
}
}
absl::Status CommandBufferCmdSequence::Record(
const Thunk::ExecuteParams& execute_params,
const CommandBufferCmd::RecordParams& record_params,
se::CommandBuffer* command_buffer, RecordMode mode) {
VLOG(3) << "Record " << commands_.size() << " commands into command buffer"
<< "; mode=" << RecordModeString(mode);
uint64_t start_micros = tsl::Env::Default()->NowMicros();
if (mode == RecordMode::kExclusive) {
if (command_buffer->state() == se::CommandBuffer::State::kFinalized) {
TF_RETURN_IF_ERROR(command_buffer->Update());
}
}
absl::flat_hash_map<ExecutionScopeId, int64_t> num_recorded_commands;
for (CommandInfo& command : commands_) {
if (execute_params.mock_collectives &&
dynamic_cast<CollectiveCmd*>(command.cmd.get())) {
continue;
}
ExecutionScopeId execution_scope_id =
command.cmd->GetExecutionScope(record_params);
std::optional<tsl::profiler::ScopedAnnotation> annotation =
GetKernelAnnotation(command.cmd->profile_annotation());
if (command.requires_barrier) {
VLOG(3) << "Add command buffer barrier after "
<< num_recorded_commands[execution_scope_id]
<< " recorded commands into the execution scope #"
<< execution_scope_id.value();
TF_RETURN_IF_ERROR(command_buffer->Barrier(execution_scope_id));
num_recorded_commands.erase(execution_scope_id);
}
VLOG(5) << "Record command buffer with scope id "
<< execution_scope_id.value();
TF_RETURN_IF_ERROR(
command.cmd->Record(execute_params, record_params, command_buffer));
++num_recorded_commands[execution_scope_id];
}
if (mode == RecordMode::kExclusive) {
TF_RETURN_IF_ERROR(command_buffer->Finalize());
}
uint64_t end_micros = tsl::Env::Default()->NowMicros();
VLOG(3) << "Recorded " << commands_.size()
<< " commands into command buffer in " << (end_micros - start_micros)
<< " μs; mode=" << RecordModeString(mode);
return absl::OkStatus();
}
const absl::flat_hash_set<CommandBufferCmd::BufferUsage>&
CommandBufferCmdSequence::buffers() const {
return buffers_;
}
const absl::flat_hash_set<BufferAllocation::Index>&
CommandBufferCmdSequence::allocs_indices() const {
return allocs_indices_;
}
std::vector<bool> CommandBufferCmdSequence::barriers() const {
std::vector<bool> barriers;
absl::c_transform(commands_, std::back_inserter(barriers),
[](auto& command) { return command.requires_barrier; });
return barriers;
}
TracedCommandBuffer::TracedCommandBuffer(
const CommandBufferCmd* trace_cmd,
CommandBufferCmd::BufferUsageVector buffers, int64_t capacity)
: trace_cmd_(trace_cmd), capacity_(capacity), entries_(capacity) {
CHECK_GT(capacity, 0) << "capacity must be larger than 0";
absl::flat_hash_set<BufferAllocation::Index> allocs_indices;
for (auto& buffer : buffers) allocs_indices.insert(buffer.slice.index());
allocs_indices_.assign(allocs_indices.begin(), allocs_indices.end());
}
absl::StatusOr<se::CommandBuffer*> TracedCommandBuffer::GetOrTraceCommandBuffer(
const BufferAllocations* buffer_allocation, se::StreamExecutor* executor,
se::Stream* stream, absl::FunctionRef<absl::Status(se::Stream*)> trace) {
absl::InlinedVector<se::DeviceMemoryBase, 4> allocs;
allocs.reserve(allocs_indices_.size());
for (auto& index : allocs_indices_) {
allocs.emplace_back(buffer_allocation->GetDeviceAddress(index));
}
auto shift_right = [&](size_t i) -> Entry& {
if (i == 0) return entries_[0];
Entry entry = std::move(entries_[i]);
do {
entries_[i] = std::move(entries_[i - 1]);
} while (--i > 0);
return entries_[0] = std::move(entry);
};
for (size_t i = 0; i < capacity_; ++i) {
if (ABSL_PREDICT_TRUE(absl::c_equal(entries_[i].recorded_allocs, allocs) &&
entries_[i].command_buffer)) {
VLOG(6) << "Command buffer trace cache hit for command "
<< trace_cmd_->ToString();
return shift_right(i).command_buffer.get();
}
if (entries_[i].command_buffer == nullptr) {
TF_ASSIGN_OR_RETURN(
entries_[i].command_buffer,
se::TraceCommandBufferFactory::Create(executor, stream, trace));
entries_[i].recorded_allocs.assign(allocs.begin(), allocs.end());
VLOG(6) << "Command buffer trace cache create new item for command "
<< trace_cmd_->ToString();
return shift_right(i).command_buffer.get();
}
}
TF_ASSIGN_OR_RETURN(
entries_[capacity_ - 1].command_buffer,
se::TraceCommandBufferFactory::Create(executor, stream, trace));
entries_[capacity_ - 1].recorded_allocs.assign(allocs.begin(), allocs.end());
VLOG(6) << "Command buffer trace cache does replacement for command "
<< trace_cmd_->ToString();
return shift_right(capacity_ - 1).command_buffer.get();
}
TracedCommandBufferCmd::TracedCommandBufferCmd(
CommandBufferCmdType cmd_type, ExecutionStreamId execution_stream_id)
: CommandBufferCmd(cmd_type, execution_stream_id) {}
absl::Status TracedCommandBufferCmd::AddTracedCommandBuffer(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer,
absl::FunctionRef<absl::Status(se::Stream*)> trace) {
auto traced_cmd =
record_params.state.GetOrCreate<TracedCommandBuffer>(this, [&] {
const auto& debug_options = xla::GetDebugOptionsFromFlags();
return std::make_unique<TracedCommandBuffer>(
this, buffers(), debug_options.xla_cmd_buffer_trace_cache_size());
});
TF_ASSIGN_OR_RETURN(
auto nested_cmd,
traced_cmd->GetOrTraceCommandBuffer(
execute_params.buffer_allocations, execute_params.stream->parent(),
execute_params.command_buffer_trace_stream, trace));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "Add nested command buffer to execution scope: "
<< execution_scope_id.value();
return command_buffer->AddNestedCommandBuffer(execution_scope_id,
*nested_cmd);
}
inline constexpr std::string_view kMemset32Kernel = R"(
.version 4.0
.target sm_50
.address_size 64
.visible .entry memset32(
.param .u64 memset32_param_0,
.param .u32 memset32_param_1,
.param .u64 memset32_param_2
)
{
.reg .pred %p<2>;
.reg .b32 %r<6>;
.reg .b64 %rd<7>;
.loc 1 3 0
ld.param.u64 %rd3, [memset32_param_0];
ld.param.u32 %r1, [memset32_param_1];
ld.param.u64 %rd2, [memset32_param_2];
.loc 1 5 3
mov.u32 %r2, %ctaid.x;
mov.u32 %r3, %ntid.x;
mov.u32 %r4, %tid.x;
mad.lo.s32 %r5, %r2, %r3, %r4;
.loc 1 6 3
cvt.s64.s32 %rd1, %r5;
setp.ge.s64 %p1, %rd1, %rd3;
@%p1 bra $L__BB0_2;
.loc 1 5 3
cvta.to.global.u64 %rd4, %rd2;
.loc 1 6 3
shl.b64 %rd5, %rd1, 2;
add.s64 %rd6, %rd4, %rd5;
st.global.u32 [%rd6], %r1;
$L__BB0_2:
.loc 1 7 1
ret;
})";
ComputationIdCmd::ComputationIdCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dest, Kind kind)
: CommandBufferCmd(CommandBufferCmdType::kComputationIdCmd,
execution_stream_id),
dest_(dest),
kind_(kind) {}
CommandBufferCmd::BufferUsageVector ComputationIdCmd::buffers() {
return {{dest_, MemoryAccess::kWrite}};
}
absl::Status ComputationIdCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
#if defined(GOOGLE_CUDA)
{
absl::MutexLock lock(&mutex_);
if (memset_kernels_.contains(params.executor)) return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
CreateKernel("memset32", 3, kMemset32Kernel,
{}, params.executor,
0));
absl::MutexLock lock(&mutex_);
memset_kernels_.emplace(params.executor, std::move(kernel));
#endif
return absl::OkStatus();
}
absl::Status ComputationIdCmd::Record(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase dst =
execute_params.buffer_allocations->GetDeviceAddress(dest_);
GlobalDeviceId global_device_id =
execute_params.collective_params->global_device_id;
TF_ASSIGN_OR_RETURN(
const DeviceAssignment::LogicalID logical_id,
execute_params.collective_params->device_assn->LogicalIdForDevice(
global_device_id));
uint32_t value = kind_ == Kind::kReplica ? logical_id.replica_id
: logical_id.computation_id;
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "ComputationIdCmd"
<< ": kind=" << (kind_ == Kind::kReplica ? "replica" : "partition")
<< "; value=" << value
<< "; execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " Id: " << dest_ << " (" << dst.opaque() << ")";
#if defined(GOOGLE_CUDA)
se::Kernel* memset_kernel = [&] {
absl::MutexLock lock(&mutex_);
return memset_kernels_[execute_params.stream->parent()].get();
}();
if (memset_kernel == nullptr) {
return absl::InternalError(
"Memset kernel not loaded on a command buffer executor");
}
auto args = se::PackKernelArgs(0, int64_t{1}, value, dst);
return command_buffer->Launch(execution_scope_id, se::ThreadDim(1),
se::BlockDim(1), *memset_kernel, *args);
#else
return command_buffer->Memset(execution_scope_id, &dst, value,
1);
#endif
}
LaunchCmd::LaunchCmd(ExecutionStreamId execution_stream_id,
std::string kernel_name,
absl::Span<const BufferAllocation::Slice> args,
absl::Span<const MemoryAccess> args_access,
LaunchDimensions dims, int64_t shmem_bytes)
: CommandBufferCmd(CommandBufferCmdType::kLaunchCmd, execution_stream_id),
kernel_name_(std::move(kernel_name)),
args_(args.begin(), args.end()),
args_access_(args_access.begin(), args_access.end()),
dims_(dims),
shmem_bytes_(shmem_bytes) {}
absl::Status LaunchCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
{
absl::MutexLock lock(&mutex_);
if (kernels_.contains(params.executor)) return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
CreateKernel(kernel_name_, args_.size(), params.src.text,
params.src.binary, params.executor, shmem_bytes_));
absl::MutexLock lock(&mutex_);
kernels_.emplace(params.executor, std::move(kernel));
return absl::OkStatus();
}
absl::Status LaunchCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "LaunchCmd: kernel=" << kernel_name_
<< "; shmem_bytes=" << shmem_bytes_
<< "; execution_scope_id=" << execution_scope_id.value();
se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernels_[execute_params.stream->parent()].get();
}();
if (kernel == nullptr) {
return absl::InternalError(absl::StrCat(
"Kernel not loaded on a command buffer executor: ", kernel_name_));
}
absl::InlinedVector<se::DeviceMemoryBase, 4> buffers;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf =
execute_params.buffer_allocations->GetDeviceAddress(arg);
VLOG(5) << " Arg: " << arg << ": " << buf.opaque();
buffers.push_back(buf);
}
TF_ASSIGN_OR_RETURN(auto kernel_args,
se::PackKernelArgs(buffers, shmem_bytes_));
return command_buffer->Launch(execution_scope_id,
dims_.thread_counts_per_block(),
dims_.block_counts(), *kernel, *kernel_args);
}
CommandBufferCmd::BufferUsageVector LaunchCmd::buffers() {
BufferUsageVector buffers;
for (int32_t i = 0; i < args_.size(); ++i) {
buffers.emplace_back(args_[i], args_access_[i]);
}
return buffers;
}
CustomKernelLaunchCmd::CustomKernelLaunchCmd(
ExecutionStreamId execution_stream_id,
absl::Span<const BufferAllocation::Slice> args,
absl::Span<const MemoryAccess> args_access, CustomKernel custom_kernel)
: CommandBufferCmd(CommandBufferCmdType::kCustomKernelLaunchCmd,
execution_stream_id),
args_(args.begin(), args.end()),
args_access_(args_access.begin(), args_access.end()),
custom_kernel_(std::move(custom_kernel)) {}
absl::Status CustomKernelLaunchCmd::Initialize(
const Thunk::InitializeParams& params, StateManager& state) {
{
absl::MutexLock lock(&mutex_);
if (kernels_.contains(params.executor)) return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
params.executor->LoadKernel(custom_kernel_.kernel_spec()));
absl::MutexLock lock(&mutex_);
kernels_.emplace(params.executor, std::move(kernel));
return absl::OkStatus();
}
absl::Status CustomKernelLaunchCmd::Record(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "CustomKernelLaunchCmd: custom_kernel=" << custom_kernel_.name()
<< "; execution_scope_id=" << execution_scope_id.value();
se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernels_[execute_params.stream->parent()].get();
}();
if (kernel == nullptr) {
return absl::InternalError(
absl::StrCat("Custom kernel not loaded on a command buffer executor: ",
custom_kernel_.name()));
}
absl::InlinedVector<se::DeviceMemoryBase, 4> buffers;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf =
execute_params.buffer_allocations->GetDeviceAddress(arg);
VLOG(5) << " Arg: " << arg << ": " << buf.opaque();
buffers.push_back(buf);
}
se::KernelArgsDeviceMemoryArray kernel_args(
buffers, custom_kernel_.shared_memory_bytes());
return command_buffer->Launch(
execution_scope_id, custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *kernel, kernel_args);
}
CommandBufferCmd::BufferUsageVector CustomKernelLaunchCmd::buffers() {
BufferUsageVector buffers;
for (int32_t i = 0; i < args_.size(); ++i) {
buffers.emplace_back(args_[i], args_access_[i]);
}
return buffers;
}
MemcpyDeviceToDeviceCmd::MemcpyDeviceToDeviceCmd(
ExecutionStreamId execution_stream_id, BufferAllocation::Slice dst,
BufferAllocation::Slice src, int64_t num_bytes)
: CommandBufferCmd(CommandBufferCmdType::kMemcpyDeviceToDeviceCmd,
execution_stream_id),
dst_(dst),
src_(src),
num_bytes_(num_bytes) {}
absl::Status MemcpyDeviceToDeviceCmd::Record(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase dst =
execute_params.buffer_allocations->GetDeviceAddress(dst_);
se::DeviceMemoryBase src =
execute_params.buffer_allocations->GetDeviceAddress(src_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "MemcpyDeviceToDeviceCmd: num_bytes = " << num_bytes_
<< "; execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " Dst: " << dst_ << " (" << dst.opaque() << ")";
VLOG(5) << " Src: " << src_ << " (" << src.opaque() << ")";
if (num_bytes_ == 0) {
VLOG(5) << "Skip recording MemcpyDeviceToDeviceCmd command of 0 bytes";
return absl::OkStatus();
}
return command_buffer->MemcpyDeviceToDevice(execution_scope_id, &dst, src,
num_bytes_);
}
CommandBufferCmd::BufferUsageVector MemcpyDeviceToDeviceCmd::buffers() {
return {{dst_, MemoryAccess::kWrite}, {src_, MemoryAccess::kRead}};
}
MemzeroCmd::MemzeroCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dst)
: CommandBufferCmd(CommandBufferCmdType::kMemzeroCmd, execution_stream_id),
dst_(dst) {}
absl::Status MemzeroCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase dst =
execute_params.buffer_allocations->GetDeviceAddress(dst_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "MemzeroCmd: execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " Dst: " << dst_ << " (" << dst.opaque() << ")";
if (dst_.size() == 0) {
VLOG(5) << "Skip recording MemzeroCmd command of 0 bytes";
return absl::OkStatus();
}
return command_buffer->Memset(execution_scope_id, &dst, uint8_t{0},
dst_.size());
}
CommandBufferCmd::BufferUsageVector MemzeroCmd::buffers() {
return {{dst_, MemoryAccess::kWrite}};
}
Memset32Cmd::Memset32Cmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dst, uint32_t bit_pattern)
: CommandBufferCmd(CommandBufferCmdType::kMemset32Cmd, execution_stream_id),
dst_(dst),
bit_pattern_(bit_pattern) {}
absl::Status Memset32Cmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase dst =
execute_params.buffer_allocations->GetDeviceAddress(dst_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "Memset32Cmd: bit_pattern=" << bit_pattern_
<< "; execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " Dst: " << dst_ << " (" << dst.opaque() << ")";
if (dst_.size() == 0) {
VLOG(5) << "Skip recording Memset32Cmd command of 0 bytes";
return absl::OkStatus();
}
return command_buffer->Memset(
execution_scope_id, &dst, bit_pattern_,
dst_.size() / sizeof(uint32_t));
}
CommandBufferCmd::BufferUsageVector Memset32Cmd::buffers() {
return {{dst_, MemoryAccess::kWrite}};
}
IfCmd::IfCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice pred,
CommandBufferCmdSequence then_commands)
: CommandBufferCmd(CommandBufferCmdType::kIfCmd, execution_stream_id),
pred_(pred),
then_commands_(std::move(then_commands)) {}
absl::Status IfCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
return then_commands_.Initialize(params, state);
}
absl::Status IfCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase pred =
execute_params.buffer_allocations->GetDeviceAddress(pred_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "IfCmd: execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " pred: " << pred_ << " (" << pred.opaque() << ")";
return command_buffer->If(
execution_scope_id, se::DeviceMemory<bool>(pred),
CreateBuilder(&then_commands_, &execute_params, &record_params));
}
bool IfCmd::force_update() { return then_commands_.force_update(); }
CommandBufferCmd::BufferUsageVector IfCmd::buffers() {
absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers;
buffers.emplace(pred_, MemoryAccess::kRead);
buffers.insert(then_commands_.buffers().begin(),
then_commands_.buffers().end());
return {buffers.begin(), buffers.end()};
}
IfElseCmd::IfElseCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice pred,
CommandBufferCmdSequence then_commands,
CommandBufferCmdSequence else_commands)
: CommandBufferCmd(CommandBufferCmdType::kIfElseCmd, execution_stream_id),
pred_(pred),
then_commands_(std::move(then_commands)),
else_commands_(std::move(else_commands)) {}
absl::Status IfElseCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
TF_RETURN_IF_ERROR(then_commands_.Initialize(params, state));
TF_RETURN_IF_ERROR(else_commands_.Initialize(params, state));
return absl::OkStatus();
}
absl::Status IfElseCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase pred =
execute_params.buffer_allocations->GetDeviceAddress(pred_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "IfElseCmd: execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " pred: " << pred_ << " (" << pred.opaque() << ")";
return command_buffer->IfElse(
execution_scope_id, se::DeviceMemory<bool>(pred),
CreateBuilder(&then_commands_, &execute_params, &record_params),
CreateBuilder(&else_commands_, &execute_params, &record_params));
}
bool IfElseCmd::force_update() {
return (then_commands_.force_update() || else_commands_.force_update());
}
CommandBufferCmd::BufferUsageVector IfElseCmd::buffers() {
absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers;
buffers.emplace(pred_, MemoryAccess::kRead);
buffers.insert(then_commands_.buffers().begin(),
then_commands_.buffers().end());
buffers.insert(else_commands_.buffers().begin(),
else_commands_.buffers().end());
return {buffers.begin(), buffers.end()};
}
CaseCmd::CaseCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice index,
std::vector<CommandBufferCmdSequence> branches_commands)
: CommandBufferCmd(CommandBufferCmdType::kCaseCmd, execution_stream_id),
index_(index),
branches_commands_(std::move(branches_commands)) {}
absl::Status CaseCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
for (auto& branch : branches_commands_) {
TF_RETURN_IF_ERROR(branch.Initialize(params, state));
}
return absl::OkStatus();
}
absl::Status CaseCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase index =
execute_params.buffer_allocations->GetDeviceAddress(index_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "CaseCmd: execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " index: " << index_ << " (" << index.opaque() << ")";
return command_buffer->Case(execution_scope_id,
se::DeviceMemory<int32_t>(index),
CreateBuilders(absl::MakeSpan(branches_commands_),
&execute_params, &record_params));
}
bool CaseCmd::force_update() {
return absl::c_any_of(branches_commands_,
[](const auto& seq) { return seq.force_update(); });
}
CommandBufferCmd::BufferUsageVector CaseCmd::buffers() {
absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers;
buffers.emplace(index_, MemoryAccess::kRead);
for (auto& branch : branches_commands_) {
buffers.insert(branch.buffers().begin(), branch.buffers().end());
}
return {buffers.begin(), buffers.end()};
}
ForCmd::ForCmd(ExecutionStreamId execution_stream_id, int32_t num_iterations,
BufferAllocation::Slice loop_counter,
CommandBufferCmdSequence body_commands)
: CommandBufferCmd(CommandBufferCmdType::kForCmd, execution_stream_id),
num_iterations_(num_iterations),
loop_counter_(loop_counter),
body_commands_(std::move(body_commands)) {}
absl::Status ForCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
return body_commands_.Initialize(params, state);
}
absl::Status ForCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase loop_counter =
execute_params.buffer_allocations->GetDeviceAddress(loop_counter_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "ForCmd: num_iterations=" << num_iterations_
<< "; body_commands=" << body_commands_.size()
<< "; execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " loop_counter: " << loop_counter_ << " ("
<< loop_counter.opaque() << ")";
return command_buffer->For(
execution_scope_id, num_iterations_,
se::DeviceMemory<int32_t>(loop_counter),
CreateBuilder(&body_commands_, &execute_params, &record_params));
}
bool ForCmd::force_update() { return body_commands_.force_update(); }
CommandBufferCmd::BufferUsageVector ForCmd::buffers() {
absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers;
buffers.emplace(loop_counter_, MemoryAccess::kWrite);
buffers.insert(body_commands_.buffers().begin(),
body_commands_.buffers().end());
return {buffers.begin(), buffers.end()};
}
WhileCmd::WhileCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice pred,
CommandBufferCmdSequence cond_commands,
CommandBufferCmdSequence body_commands)
: CommandBufferCmd(CommandBufferCmdType::kWhileCmd, execution_stream_id),
pred_(pred),
cond_commands_(std::move(cond_commands)),
body_commands_(std::move(body_commands)) {}
absl::Status WhileCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
TF_RETURN_IF_ERROR(cond_commands_.Initialize(params, state));
return body_commands_.Initialize(params, state);
}
absl::Status WhileCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase pred =
execute_params.buffer_allocations->GetDeviceAddress(pred_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "WhileCmd: cond_commands=" << cond_commands_.size()
<< " body_commands=" << body_commands_.size()
<< "; execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " pred: " << pred_ << " (" << pred.opaque() << ")";
return command_buffer->While(
execution_scope_id, se::DeviceMemory<bool>(pred),
CreateExecutionScopeBuilder(&cond_commands_, &execute_params,
&record_params),
CreateBuilder(&body_commands_, &execute_params, &record_params));
}
bool WhileCmd::force_update() {
return (cond_commands_.force_update() || body_commands_.force_update());
}
CommandBufferCmd::BufferUsageVector WhileCmd::buffers() {
absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers;
buffers.emplace(pred_, MemoryAccess::kWrite);
buffers.insert(cond_commands_.buffers().begin(),
cond_commands_.buffers().end());
buffers.insert(body_commands_.buffers().begin(),
body_commands_.buffers().end());
return {buffers.begin(), buffers.end()};
}
GemmCmd::GemmCmd(ExecutionStreamId execution_stream_id, GemmConfig config,
const BufferAllocation::Slice& lhs_buffer,
const BufferAllocation::Slice& rhs_buffer,
const BufferAllocation::Slice& output_buffer,
const BufferAllocation::Slice& workspace, bool deterministic)
: TracedCommandBufferCmd(CommandBufferCmdType::kGemmCmd,
execution_stream_id),
config_(std::move(config)),
lhs_buffer_(lhs_buffer),
rhs_buffer_(rhs_buffer),
output_buffer_(output_buffer),
workspace_(workspace),
deterministic_(deterministic) {}
absl::Status GemmCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
if (!params.stream->parent()->AsBlas()) {
return absl::InternalError("Failed to initialize BLAS support for GemmCmd");
}
return absl::OkStatus();
}
absl::Status GemmCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
se::DeviceMemoryBase lhs =
execute_params.buffer_allocations->GetDeviceAddress(lhs_buffer_);
se::DeviceMemoryBase rhs =
execute_params.buffer_allocations->GetDeviceAddress(rhs_buffer_);
se::DeviceMemoryBase out =
execute_params.buffer_allocations->GetDeviceAddress(output_buffer_);
se::DeviceMemoryBase workspace =
execute_params.buffer_allocations->GetDeviceAddress(workspace_);
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "GemmCmd: deterministic=" << deterministic_
<< "; execution_scope_id=" << execution_scope_id.value();
VLOG(5) << " Lhs: " << lhs_buffer_ << " (" << lhs.opaque() << ")";
VLOG(5) << " Lhs: " << rhs_buffer_ << " (" << rhs.opaque() << ")";
VLOG(5) << " Out: " << output_buffer_ << " (" << out.opaque() << ")";
VLOG(5) << " Workspace: " << workspace_ << " (" << workspace.opaque() << ")";
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return RunGemm(config_, lhs, rhs, out, workspace, deterministic_,
stream);
});
}
CommandBufferCmd::BufferUsageVector GemmCmd::buffers() {
return {{lhs_buffer_, MemoryAccess::kRead},
{rhs_buffer_, MemoryAccess::kRead},
{output_buffer_, MemoryAccess::kWrite},
{workspace_, MemoryAccess::kWrite}};
}
CublasLtCmd::CublasLtCmd(
ExecutionStreamId execution_stream_id, GemmConfig gemm_config,
se::gpu::BlasLt::Epilogue epilogue, int64_t algorithm_idx,
BufferAllocation::Slice a_buffer, BufferAllocation::Slice b_buffer,
BufferAllocation::Slice c_buffer, BufferAllocation::Slice d_buffer,
BufferAllocation::Slice bias_buffer ,
BufferAllocation::Slice aux_buffer ,
BufferAllocation::Slice a_scale_buffer ,
BufferAllocation::Slice b_scale_buffer ,
BufferAllocation::Slice c_scale_buffer ,
BufferAllocation::Slice d_scale_buffer ,
BufferAllocation::Slice d_amax_buffer ,
BufferAllocation::Slice workspace_buffer)
: TracedCommandBufferCmd(CommandBufferCmdType::kCublasLtCmd,
execution_stream_id),
gemm_config_(std::move(gemm_config)),
epilogue_(epilogue),
algorithm_idx_(algorithm_idx),
a_buffer_(a_buffer),
b_buffer_(b_buffer),
c_buffer_(c_buffer),
d_buffer_(d_buffer),
bias_buffer_(bias_buffer),
aux_buffer_(aux_buffer),
a_scale_buffer_(a_scale_buffer),
b_scale_buffer_(b_scale_buffer),
c_scale_buffer_(c_scale_buffer),
d_scale_buffer_(d_scale_buffer),
d_amax_buffer_(d_amax_buffer),
workspace_buffer_(workspace_buffer) {}
absl::StatusOr<se::gpu::BlasLt::MatmulPlan*> CublasLtCmd::GetMatmulPlan(
const stream_executor::Stream* stream) {
auto it = matmul_plans_cache_.find(stream);
if (it != matmul_plans_cache_.end()) return it->second.get();
TF_ASSIGN_OR_RETURN(auto plan, se::gpu::BlasLt::GetMatmulPlan(
stream, gemm_config_, epilogue_));
auto [it_insert, _] = matmul_plans_cache_.emplace(stream, std::move(plan));
return it_insert->second.get();
}
absl::StatusOr<se::gpu::BlasLt::MatmulAlgorithm>
CublasLtCmd::GetMatmulAlgorithm(const se::gpu::BlasLt::MatmulPlan* plan,
int64_t max_workspace) {
auto it = matmul_algorithm_cache_.find(plan);
if (it != matmul_algorithm_cache_.end()) return it->second;
TF_ASSIGN_OR_RETURN(
auto algorithms,
plan->GetAlgorithms( 128,
max_workspace));
TF_RET_CHECK(algorithm_idx_ >= 0 && algorithm_idx_ < algorithms.size());
auto [it_insert, _] =
matmul_algorithm_cache_.emplace(plan, algorithms[algorithm_idx_]);
return it_insert->second;
}
absl::Status CublasLtCmd::Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
if (!params.stream->parent()->AsBlas()) {
return absl::InternalError("Failed to initialize BLAS support for GemmCmd");
}
TF_ASSIGN_OR_RETURN(auto plan, GetMatmulPlan(params.stream));
TF_RETURN_IF_ERROR(
GetMatmulAlgorithm(plan, workspace_buffer_.size()).status());
return absl::OkStatus();
}
absl::Status CublasLtCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
TF_ASSIGN_OR_RETURN(auto plan, GetMatmulPlan(execute_params.stream));
TF_ASSIGN_OR_RETURN(auto algorithm,
GetMatmulAlgorithm(plan, workspace_buffer_.size()));
const BufferAllocations& allocs = *execute_params.buffer_allocations;
se::DeviceMemoryBase bias, a_scale, b_scale, c_scale, d_scale, aux, d_amax;
if (bias_buffer_.allocation() != nullptr) {
bias = allocs.GetDeviceAddress(bias_buffer_);
}
if (a_scale_buffer_.allocation() != nullptr) {
a_scale = allocs.GetDeviceAddress(a_scale_buffer_);
}
if (b_scale_buffer_.allocation() != nullptr) {
b_scale = allocs.GetDeviceAddress(b_scale_buffer_);
}
if (c_scale_buffer_.allocation() != nullptr) {
c_scale = allocs.GetDeviceAddress(c_scale_buffer_);
}
if (d_scale_buffer_.allocation() != nullptr) {
d_scale = allocs.GetDeviceAddress(d_scale_buffer_);
}
if (d_amax_buffer_.allocation() != nullptr) {
d_amax = allocs.GetDeviceAddress(d_amax_buffer_);
}
if (aux_buffer_.allocation() != nullptr) {
aux = allocs.GetDeviceAddress(aux_buffer_);
}
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "CublasLtCmd with execution_scope_id: "
<< execution_scope_id.value();
VLOG(5) << " a_buffer: " << a_buffer_.ToString();
VLOG(5) << " b_buffer: " << b_buffer_.ToString();
VLOG(5) << " c_buffer: " << c_buffer_.ToString();
VLOG(5) << " d_buffer: " << d_buffer_.ToString();
VLOG(5) << " bias_buffer: " << bias_buffer_.ToString();
VLOG(5) << " aux_buffer: " << aux_buffer_.ToString();
VLOG(5) << " a_scale_buffer: " << a_scale_buffer_.ToString();
VLOG(5) << " b_scale_buffer: " << b_scale_buffer_.ToString();
VLOG(5) << " c_scale_buffer: " << c_scale_buffer_.ToString();
VLOG(5) << " d_scale_buffer: " << d_scale_buffer_.ToString();
VLOG(5) << " d_amax_buffer: " << d_amax_buffer_.ToString();
VLOG(5) << " workspace_buffer: " << workspace_buffer_.ToString();
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return plan->ExecuteOnStream(
stream, allocs.GetDeviceAddress(a_buffer_),
allocs.GetDeviceAddress(b_buffer_),
allocs.GetDeviceAddress(c_buffer_),
allocs.GetDeviceAddress(d_buffer_), bias, aux, a_scale, b_scale,
c_scale, d_scale, d_amax, algorithm,
allocs.GetDeviceAddress(workspace_buffer_));
});
}
CommandBufferCmd::BufferUsageVector CublasLtCmd::buffers() {
BufferUsageVector buffer_usage;
buffer_usage.reserve(13);
buffer_usage.push_back({a_buffer_, MemoryAccess::kRead});
buffer_usage.push_back({b_buffer_, MemoryAccess::kRead});
buffer_usage.push_back({c_buffer_, MemoryAccess::kRead});
buffer_usage.push_back({d_buffer_, MemoryAccess::kWrite});
buffer_usage.push_back({workspace_buffer_, MemoryAccess::kWrite});
if (bias_buffer_.allocation() != nullptr) {
buffer_usage.push_back({bias_buffer_, MemoryAccess::kRead});
}
if (a_scale_buffer_.allocation() != nullptr) {
buffer_usage.push_back({a_scale_buffer_, MemoryAccess::kRead});
}
if (b_scale_buffer_.allocation() != nullptr) {
buffer_usage.push_back({b_scale_buffer_, MemoryAccess::kRead});
}
if (c_scale_buffer_.allocation() != nullptr) {
buffer_usage.push_back({c_scale_buffer_, MemoryAccess::kRead});
}
if (d_scale_buffer_.allocation() != nullptr) {
buffer_usage.push_back({d_scale_buffer_, MemoryAccess::kRead});
}
if (aux_buffer_.allocation() != nullptr) {
buffer_usage.push_back({aux_buffer_, MemoryAccess::kWrite});
}
if (d_amax_buffer_.allocation() != nullptr) {
buffer_usage.push_back({d_amax_buffer_, MemoryAccess::kRead});
}
return buffer_usage;
}
CuDnnCmd::CuDnnCmd(ExecutionStreamId execution_stream_id,
absl::Span<const BufferAllocation::Slice> args,
const std::shared_ptr<se::dnn::LazyDnnGraph> graph)
: TracedCommandBufferCmd(CommandBufferCmdType::kCuDnnCmd,
execution_stream_id),
args_(args.cbegin(), args.cend()),
graph_(graph) {}
absl::Status CuDnnCmd::Initialize(const Thunk::InitializeParams& params,
StateManager&) {
if (!params.stream->parent()->AsDnn()) {
return absl::InternalError("Failed to initialize DNN support for CuDnnCmd");
}
return absl::OkStatus();
}
absl::Status CuDnnCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
CHECK(graph_ != nullptr);
std::vector<se::DeviceMemoryBase> operands;
operands.reserve(args_.size());
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf =
execute_params.buffer_allocations->GetDeviceAddress(arg);
VLOG(5) << " Arg: " << arg << ": " << buf.opaque();
operands.push_back(buf);
}
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return graph_->get()->Execute(
*stream, absl::Span<se::DeviceMemoryBase>(operands),
execute_params.collective_params->local_device_ordinal);
});
}
CommandBufferCmd::BufferUsageVector CuDnnCmd::buffers() {
CommandBufferCmd::BufferUsageVector buffer_usage;
buffer_usage.reserve(args_.size());
for (int i = 0; i < args_.size() - 1; ++i) {
buffer_usage.push_back({args_[i], MemoryAccess::kRead});
}
buffer_usage.push_back({args_.back(), MemoryAccess::kWrite});
return buffer_usage;
}
absl::Status CustomCallCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
if (handler_ == nullptr) {
return RecordLegacyCustomCall(execute_params, record_params,
command_buffer);
}
return RecordXlaFfiCall(execute_params, record_params, command_buffer);
}
absl::Status CustomCallCmd::RecordLegacyCustomCall(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
std::vector<void*> buffers;
buffers.reserve(operands_.size() + results_.size());
for (auto& slices : {operands_, results_}) {
for (const std::optional<Slice>& slice : slices) {
if (!slice.has_value()) {
buffers.push_back(nullptr);
continue;
}
if (!slice->slice.allocation()) {
return absl::InternalError(
"custom call input missing buffer allocation");
}
buffers.push_back(
execute_params.buffer_allocations->GetDeviceAddress(slice->slice)
.opaque());
}
}
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "CustomCallCmd: execution_scope_id=" << execution_scope_id.value();
for (int i = 0; i < operands_.size(); ++i) {
if (operands_[i].has_value()) {
VLOG(5) << " Operand " << i << ": " << operands_[i]->slice << " ("
<< buffers[i] << ")";
} else {
VLOG(5) << " Operand " << i << ": null";
}
}
for (int i = 0; i < results_.size(); ++i) {
if (results_[i].has_value()) {
VLOG(5) << " Result " << i << ": " << results_[i]->slice << " ("
<< buffers[operands_.size() + i] << ")";
} else {
VLOG(5) << " Result " << i << ": null";
}
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_ASSIGN_OR_RETURN(
auto nested_cmd,
se::TraceCommandBufferFactory::Create(
execute_params.stream->parent(),
execute_params.command_buffer_trace_stream, [&](se::Stream* stream) {
se::gpu::GpuStreamHandle gpu_stream =
se::gpu::AsGpuStreamValue(stream);
XlaCustomCallStatus custom_call_status;
call_target_(gpu_stream, buffers.data(), opaque_.data(),
opaque_.size(), &custom_call_status);
auto message = CustomCallStatusGetMessage(&custom_call_status);
if (message) {
return absl::InternalError(
absl::StrCat("CustomCall failed: ", *message));
}
return absl::OkStatus();
}));
return command_buffer->AddNestedCommandBuffer(execution_scope_id,
*nested_cmd);
#else
return Unavailable(
"Custom calls on GPU are not supported in this configuration. Please "
"build with --config=cuda or --config=rocm");
#endif
}
absl::Status CustomCallCmd::RecordXlaFfiCall(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
ffi::CallFrameBuilder builder(operands_.size(), results_.size());
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "CustomCallCmd: execution_scope_id=" << execution_scope_id.value();
for (int i = 0; i < operands_.size(); ++i) {
const std::optional<Slice>& slice = operands_[i];
if (!slice.has_value()) {
return Internal("FFI handlers do not support tokens (yet)!");
}
if (!slice->slice.allocation())
return Internal("custom call input missing buffer allocation");
se::DeviceMemoryBase buffer =
execute_params.buffer_allocations->GetDeviceAddress(slice->slice);
VLOG(5) << " Operand " << i << ": " << slice->slice << " ("
<< buffer.opaque() << ")";
builder.AddBufferArg(buffer, slice->shape.element_type(),
slice->shape.dimensions());
}
for (int i = 0; i < results_.size(); ++i) {
const std::optional<Slice>& slice = results_[i];
if (!slice.has_value()) {
return Internal("FFI handlers do not support tokens (yet)!");
}
if (!slice->slice.allocation())
return Internal("custom call input missing buffer allocation");
se::DeviceMemoryBase buffer =
execute_params.buffer_allocations->GetDeviceAddress(slice->slice);
VLOG(5) << " Result " << i << ": " << slice->slice << " ("
<< buffer.opaque() << ")";
builder.AddBufferArg(buffer, slice->shape.element_type(),
slice->shape.dimensions());
}
ffi::CallFrameBuilder::AttributesBuilder attrs;
attrs.Append(attributes_);
builder.AddAttributes(attrs.Build());
ffi::CallFrame call_frame = builder.Build();
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_ASSIGN_OR_RETURN(
auto nested_cmd,
se::TraceCommandBufferFactory::Create(
execute_params.stream->parent(),
execute_params.command_buffer_trace_stream, [&](se::Stream* stream) {
ffi::CallOptions options = {
execute_params.buffer_allocations->device_ordinal(),
ffi::CallOptions::GpuOptions{
execute_params.stream,
execute_params.buffer_allocations->memory_allocator()},
nullptr,
execute_params.ffi_execution_context};
return ffi::Call(handler_, call_frame, options);
}));
return command_buffer->AddNestedCommandBuffer(execution_scope_id,
*nested_cmd);
#else
return Unavailable(
"Custom calls on GPU are not supported in this configuration. Please "
"build with --config=cuda or --config=rocm");
#endif
}
CommandBufferCmd::BufferUsageVector CustomCallCmd::buffers() {
CommandBufferCmd::BufferUsageVector buffer_usage;
for (auto& slices : {operands_, results_}) {
for (const std::optional<Slice>& slice : slices) {
if (!slice.has_value()) continue;
buffer_usage.push_back({slice->slice, MemoryAccess::kWrite});
}
}
return buffer_usage;
}
BarrierCmd::BarrierCmd(ExecutionStreamId execution_stream_id,
ExecutionStreamId from_stream_id)
: CommandBufferCmd(CommandBufferCmdType::kBarrierCmd, execution_stream_id),
from_stream_id_(from_stream_id) {}
absl::Status BarrierCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
VLOG(5) << "BarrierCmd from stream " << from_stream_id_.value()
<< " to stream " << execution_stream_id().value();
if (from_stream_id_ != execution_stream_id()) {
TF_RETURN_IF_ERROR(command_buffer->Barrier(
CommandBufferCmd::GetExecutionScope(record_params, from_stream_id_),
CommandBufferCmd::GetExecutionScope(record_params,
execution_stream_id())));
}
return absl::OkStatus();
}
BarrierCmd::BufferUsageVector BarrierCmd::buffers() { return {}; }
CollectiveCmd::CollectiveCmd(CommandBufferCmdType cmd_type,
ExecutionStreamId execution_stream_id,
ExecutionStreamId async_from_stream_id,
NcclApi* nccl_api, NcclCollectiveConfig config)
: CommandBufferCmd(cmd_type, execution_stream_id),
async_from_stream_id_(async_from_stream_id),
nccl_api_(nccl_api),
config_(std::move(config)) {}
absl::Status CollectiveCmd::BarrierIfAsync(
se::CommandBuffer* command_buffer, se::StreamExecutor* executor,
const CommandBufferCmd::RecordParams& record_params) {
if (IsAsync()) {
TF_RETURN_IF_ERROR(
command_buffer->Barrier(CommandBufferCmd::GetExecutionScope(
record_params, async_from_stream_id_),
CommandBufferCmd::GetExecutionScope(
record_params, execution_stream_id())));
VLOG(5) << "Insert Async barrier from stream "
<< async_from_stream_id_.value() << " to stream "
<< execution_stream_id().value();
}
return absl::OkStatus();
}
absl::Status CollectiveCmd::Prepare(
const Thunk::PrepareParams& params,
Thunk::ResourceRequests& resource_requests) {
TF_ASSIGN_OR_RETURN(
NcclCliqueKey clique_key,
GetNcclCliqueKey(*params.collective_params, config().replica_groups,
config().group_mode, nccl_stream_id(),
GetAsyncStreamKind()));
TF_ASSIGN_OR_RETURN(
size_t num_local_participants,
GetNumLocalParticipants(*params.collective_params,
config().replica_groups, config().group_mode));
return resource_requests.AddClique(clique_key, num_local_participants);
}
absl::Status CollectiveCmd::AddTracedCommandBuffer(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer,
absl::FunctionRef<absl::Status(se::Stream*)> trace) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::CommandBuffer> nested_cmd,
se::TraceCommandBufferFactory::Create(
execute_params.stream->parent(),
execute_params.command_buffer_trace_stream, trace));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
return command_buffer->AddNestedCommandBuffer(execution_scope_id,
*nested_cmd);
}
AllReduceCmd::AllReduceCmd(
ExecutionStreamId execution_stream_id,
ExecutionStreamId async_from_stream_id, NcclApi* nccl_api,
NcclCollectiveConfig config, ReductionKind reduction_kind,
absl::Span<const NcclCollectiveThunk::Buffer> buffers)
: CollectiveCmd(CommandBufferCmdType::kAllReduceCmd, execution_stream_id,
async_from_stream_id, nccl_api, std::move(config)),
reduction_kind_(reduction_kind),
buffers_(buffers.begin(), buffers.end()) {}
absl::Status AllReduceCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
TF_RETURN_IF_ERROR(BarrierIfAsync(
command_buffer, execute_params.stream->parent(), record_params));
TF_ASSIGN_OR_RETURN(
std::vector<DeviceBufferPair> device_buffers,
ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_,
config().operand_element_type));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "AllReduceCmd: reduction=" << ReductionKindString(reduction_kind_)
<< "; execution_scope_id=" << execution_scope_id.value();
for (size_t i = 0; i < device_buffers.size(); ++i) {
VLOG(5) << " Src: " << buffers_[i].source_buffer << " ("
<< device_buffers[i].source_buffer.opaque() << ")";
VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " ("
<< device_buffers[i].destination_buffer.opaque() << ")";
}
if (!execute_params.collective_params || !execute_params.collective_cliques) {
return absl::InvalidArgumentError(
"AllReduceCmd requires collective parameters and cliques");
}
TF_ASSIGN_OR_RETURN(
NcclCommHandleWrapper comm_handle,
GetNcclComm(*execute_params.collective_params,
*execute_params.collective_cliques, config().replica_groups,
config().group_mode, nccl_stream_id(), GetAsyncStreamKind()));
NcclApi::NcclCommHandle comm = comm_handle.comm_handle;
NcclApi::ScopedPersistentPlanAllocator scoped_allocator(
comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>(
execute_params.buffer_allocations->device_ordinal(),
execute_params.buffer_allocations->memory_allocator(),
execute_params.stream));
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return RunAllReduce(nccl_api(), reduction_kind_, device_buffers,
*stream, comm);
});
}
CommandBufferCmd::BufferUsageVector AllReduceCmd::buffers() {
BufferUsageVector buffer_usage;
for (auto& buffer : buffers_) {
buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead);
buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite);
}
return buffer_usage;
}
ReduceScatterCmd::ReduceScatterCmd(
ExecutionStreamId execution_stream_id,
ExecutionStreamId async_from_stream_id, NcclApi* nccl_api,
NcclCollectiveConfig config, ReductionKind reduction_kind,
absl::Span<const NcclCollectiveThunk::Buffer> buffers)
: CollectiveCmd(CommandBufferCmdType::kReduceScatter, execution_stream_id,
async_from_stream_id, nccl_api, std::move(config)),
reduction_kind_(reduction_kind),
buffers_(buffers.begin(), buffers.end()) {}
absl::Status ReduceScatterCmd::Record(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
TF_RETURN_IF_ERROR(BarrierIfAsync(
command_buffer, execute_params.stream->parent(), record_params));
TF_ASSIGN_OR_RETURN(
std::vector<DeviceBufferPair> device_buffers,
ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_,
config().operand_element_type));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "ReduceScatterCmd: reduction="
<< ReductionKindString(reduction_kind_)
<< "; execution_scope_id=" << execution_scope_id.value();
for (size_t i = 0; i < device_buffers.size(); ++i) {
VLOG(5) << " Src: " << buffers_[i].source_buffer << " ("
<< device_buffers[i].source_buffer.opaque() << ")";
VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " ("
<< device_buffers[i].destination_buffer.opaque() << ")";
}
if (!execute_params.collective_params || !execute_params.collective_cliques) {
return absl::InvalidArgumentError(
"ReduceScatterCmd requires collective parameters and cliques");
}
TF_ASSIGN_OR_RETURN(
NcclCommHandleWrapper comm_handle,
GetNcclComm(*execute_params.collective_params,
*execute_params.collective_cliques, config().replica_groups,
config().group_mode, nccl_stream_id(), GetAsyncStreamKind()));
NcclApi::NcclCommHandle comm = comm_handle.comm_handle;
NcclApi::ScopedPersistentPlanAllocator scoped_allocator(
comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>(
execute_params.buffer_allocations->device_ordinal(),
execute_params.buffer_allocations->memory_allocator(),
execute_params.stream));
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return RunReduceScatter(nccl_api(), reduction_kind_, device_buffers,
*stream, comm);
});
}
CommandBufferCmd::BufferUsageVector ReduceScatterCmd::buffers() {
BufferUsageVector buffer_usage;
for (auto& buffer : buffers_) {
buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead);
buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite);
}
return buffer_usage;
}
AllToAllCmd::AllToAllCmd(ExecutionStreamId execution_stream_id,
ExecutionStreamId async_from_stream_id,
NcclApi* nccl_api, NcclCollectiveConfig config,
bool has_split_dimension,
absl::Span<const NcclCollectiveThunk::Buffer> buffers)
: CollectiveCmd(CommandBufferCmdType::kAllToAll, execution_stream_id,
async_from_stream_id, nccl_api, std::move(config)),
has_split_dimension_(has_split_dimension),
buffers_(buffers.begin(), buffers.end()) {}
absl::Status AllToAllCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
TF_RETURN_IF_ERROR(BarrierIfAsync(
command_buffer, execute_params.stream->parent(), record_params));
TF_ASSIGN_OR_RETURN(
std::vector<DeviceBufferPair> device_buffers,
ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_,
config().operand_element_type));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "AllToAllCmd, has_split_dimension=" << has_split_dimension_
<< ", execution_scope_id=" << execution_scope_id.value();
for (size_t i = 0; i < device_buffers.size(); ++i) {
VLOG(5) << " Src: " << buffers_[i].source_buffer << " ("
<< device_buffers[i].source_buffer.opaque() << ")";
VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " ("
<< device_buffers[i].destination_buffer.opaque() << ")";
}
if (!execute_params.collective_params || !execute_params.collective_cliques) {
return absl::InvalidArgumentError(
"ReduceScatterCmd requires collective parameters and cliques");
}
TF_ASSIGN_OR_RETURN(
NcclCommHandleWrapper comm_handle,
GetNcclComm(*execute_params.collective_params,
*execute_params.collective_cliques, config().replica_groups,
config().group_mode, nccl_stream_id(), GetAsyncStreamKind()));
NcclApi::NcclCommHandle comm = comm_handle.comm_handle;
NcclApi::ScopedPersistentPlanAllocator scoped_allocator(
comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>(
execute_params.buffer_allocations->device_ordinal(),
execute_params.buffer_allocations->memory_allocator(),
execute_params.stream));
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return RunAllToAll(nccl_api(), has_split_dimension_, device_buffers,
*stream, comm);
});
}
CommandBufferCmd::BufferUsageVector AllToAllCmd::buffers() {
BufferUsageVector buffer_usage;
for (auto& buffer : buffers_) {
buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead);
buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite);
}
return buffer_usage;
}
AllGatherCmd::AllGatherCmd(
ExecutionStreamId execution_stream_id,
ExecutionStreamId async_from_stream_id, NcclApi* nccl_api,
NcclCollectiveConfig config,
absl::Span<const NcclCollectiveThunk::Buffer> buffers)
: CollectiveCmd(CommandBufferCmdType::kAllGatherCmd, execution_stream_id,
async_from_stream_id, nccl_api, std::move(config)),
buffers_(buffers.begin(), buffers.end()) {}
absl::Status AllGatherCmd::Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) {
TF_RETURN_IF_ERROR(BarrierIfAsync(
command_buffer, execute_params.stream->parent(), record_params));
TF_ASSIGN_OR_RETURN(
std::vector<DeviceBufferPair> device_buffers,
ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_,
config().operand_element_type));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "AllGatherCmd: execution_scope_id=" << execution_scope_id.value();
for (size_t i = 0; i < device_buffers.size(); ++i) {
VLOG(5) << " Src: " << buffers_[i].source_buffer << " ("
<< device_buffers[i].source_buffer.opaque() << ")";
VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " ("
<< device_buffers[i].destination_buffer.opaque() << ")";
}
if (!execute_params.collective_params || !execute_params.collective_cliques) {
return absl::InvalidArgumentError(
"AllGatherCmd requires collective parameters and cliques");
}
TF_ASSIGN_OR_RETURN(
NcclCommHandleWrapper comm_handle,
GetNcclComm(*execute_params.collective_params,
*execute_params.collective_cliques, config().replica_groups,
config().group_mode, nccl_stream_id(), GetAsyncStreamKind()));
NcclApi::NcclCommHandle comm = comm_handle.comm_handle;
NcclApi::ScopedPersistentPlanAllocator scoped_allocator(
comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>(
execute_params.buffer_allocations->device_ordinal(),
execute_params.buffer_allocations->memory_allocator(),
execute_params.stream));
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return RunAllGather(nccl_api(), device_buffers, *stream, comm);
});
}
CommandBufferCmd::BufferUsageVector AllGatherCmd::buffers() {
BufferUsageVector buffer_usage;
for (auto& buffer : buffers_) {
buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead);
buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite);
}
return buffer_usage;
}
CollectiveBroadcastCmd::CollectiveBroadcastCmd(
ExecutionStreamId execution_stream_id,
ExecutionStreamId async_from_stream_id, NcclApi* nccl_api,
NcclCollectiveConfig config,
absl::Span<const NcclCollectiveThunk::Buffer> buffers)
: CollectiveCmd(CommandBufferCmdType::kCollectiveBroadcastCmd,
execution_stream_id, async_from_stream_id, nccl_api,
std::move(config)),
buffers_(buffers.begin(), buffers.end()) {}
absl::Status CollectiveBroadcastCmd::Record(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer) {
TF_RETURN_IF_ERROR(BarrierIfAsync(
command_buffer, execute_params.stream->parent(), record_params));
TF_ASSIGN_OR_RETURN(
std::vector<DeviceBufferPair> device_buffers,
ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_,
config().operand_element_type));
ExecutionScopeId execution_scope_id = GetExecutionScope(record_params);
VLOG(5) << "CollectiveBroadcastCmd: execution_scope_id="
<< execution_scope_id.value();
for (size_t i = 0; i < device_buffers.size(); ++i) {
VLOG(5) << " Src: " << buffers_[i].source_buffer << " ("
<< device_buffers[i].source_buffer.opaque() << ")";
VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " ("
<< device_buffers[i].destination_buffer.opaque() << ")";
}
if (!execute_params.collective_params || !execute_params.collective_cliques) {
return absl::InvalidArgumentError(
"CollectiveBroadcastCmd requires collective parameters and cliques");
}
TF_ASSIGN_OR_RETURN(
NcclCommHandleWrapper comm_handle,
GetNcclComm(*execute_params.collective_params,
*execute_params.collective_cliques, config().replica_groups,
config().group_mode, nccl_stream_id(), GetAsyncStreamKind()));
NcclApi::NcclCommHandle comm = comm_handle.comm_handle;
NcclApi::ScopedPersistentPlanAllocator scoped_allocator(
comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>(
execute_params.buffer_allocations->device_ordinal(),
execute_params.buffer_allocations->memory_allocator(),
execute_params.stream));
return AddTracedCommandBuffer(
execute_params, record_params, command_buffer, [&](se::Stream* stream) {
return RunCollectiveBroadcast(device_buffers, *stream, comm,
nccl_api());
});
}
CommandBufferCmd::BufferUsageVector CollectiveBroadcastCmd::buffers() {
BufferUsageVector buffer_usage;
for (auto& buffer : buffers_) {
buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead);
buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite);
}
return buffer_usage;
}
} | #include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include <array>
#include <cstdint>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/platform_util.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_test_kernels_fatbin.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::gpu {
using BufferUsage = CommandBufferCmd::BufferUsage;
using BufferUsageVector = CommandBufferCmd::BufferUsageVector;
using MemoryAccess = CommandBufferCmd::MemoryAccess;
static se::StreamExecutor* GpuExecutor() {
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
static constexpr auto s0 = ExecutionStreamId(0);
static constexpr auto s1 = ExecutionStreamId(1);
struct TestOnlyCommandBufferCmd : public CommandBufferCmd {
TestOnlyCommandBufferCmd(ExecutionStreamId execution_stream_id,
BufferUsageVector buffer_usage)
: CommandBufferCmd(CommandBufferCmdType::kUnknownCmd,
execution_stream_id),
buffer_usage(buffer_usage) {}
absl::Status Record(const Thunk::ExecuteParams&, const RecordParams&,
se::CommandBuffer*) override {
return absl::OkStatus();
}
BufferUsageVector buffers() override { return buffer_usage; }
BufferUsageVector buffer_usage;
};
class FakeCmd : public CommandBufferCmd {
public:
FakeCmd(ExecutionStreamId execution_stream_id)
: CommandBufferCmd(CommandBufferCmdType::kTracedCommandBufferCmd,
execution_stream_id) {}
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override {
return absl::OkStatus();
}
BufferUsageVector buffers() override { return BufferUsageVector{}; }
};
TEST(CommandBufferCmdTest, SerializeExecution) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice1, MemoryAccess::kRead);
CommandBufferCmdSequence commands(
CommandBufferCmdSequence::SynchronizationMode::kSerialize);
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), true);
}
TEST(CommandBufferCmdTest, NoReadBarrier) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice1, MemoryAccess::kRead);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
}
TEST(CommandBufferCmdTest, NoWriteBarrier) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 200, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kWrite);
auto use1 = BufferUsage(slice1, MemoryAccess::kWrite);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
}
TEST(CommandBufferCmdTest, WriteConflictBarrier) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice0, MemoryAccess::kRead);
auto use2 = BufferUsage(slice1, MemoryAccess::kWrite);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use2});
ASSERT_EQ(commands.barriers().size(), 3);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
EXPECT_EQ(commands.barriers().at(2), true);
}
TEST(CommandBufferCmdTest, NoWriteConflictsAcrossStreams) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice1, MemoryAccess::kWrite);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s1, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
}
TEST(CommandBufferCmdTest, MemcpyCmd) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length);
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b}, 0, &allocator);
CommandBufferCmd::StateManager state;
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
CommandBufferCmd::RecordParams record_params = {state};
auto command_buffer =
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value();
TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get()));
TF_ASSERT_OK(command_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42));
}
TEST(CommandBufferCmdTest, BarrierCmd) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> e = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(stream->MemZero(&e, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation alloc_c(2, byte_length, 0);
BufferAllocation alloc_d(3, byte_length, 0);
BufferAllocation alloc_e(4, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
BufferAllocation::Slice slice_c(&alloc_c, 0, byte_length);
BufferAllocation::Slice slice_d(&alloc_d, 0, byte_length);
BufferAllocation::Slice slice_e(&alloc_e, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length);
commands.Emplace<BarrierCmd>(s1, s0);
commands.Emplace<MemcpyDeviceToDeviceCmd>(s1, slice_c, slice_b, byte_length);
commands.Emplace<BarrierCmd>(s0, s1);
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_d, slice_c, byte_length);
commands.Emplace<BarrierCmd>(s1, s0);
commands.Emplace<MemcpyDeviceToDeviceCmd>(s1, slice_e, slice_d, byte_length);
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b, c, d, e}, 0, &allocator);
CommandBufferCmd::StateManager state;
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
CommandBufferCmd::RecordParams record_params = {state};
auto command_buffer =
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value();
TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get()));
TF_ASSERT_OK(command_buffer->Submit(stream.get()));
std::vector<int32_t> dst_b(4, 0);
std::vector<int32_t> dst_c(4, 0);
std::vector<int32_t> dst_d(4, 0);
std::vector<int32_t> dst_e(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst_b.data(), b, byte_length));
TF_ASSERT_OK(stream->Memcpy(dst_c.data(), c, byte_length));
TF_ASSERT_OK(stream->Memcpy(dst_d.data(), d, byte_length));
TF_ASSERT_OK(stream->Memcpy(dst_e.data(), e, byte_length));
ASSERT_EQ(dst_b, std::vector<int32_t>(4, 42));
ASSERT_EQ(dst_c, std::vector<int32_t>(4, 42));
ASSERT_EQ(dst_d, std::vector<int32_t>(4, 42));
ASSERT_EQ(dst_e, std::vector<int32_t>(4, 42));
}
TEST(CommandBufferCmdTest, LaunchCmd) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
auto args = {slice_a, slice_a, slice_b};
auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead,
MemoryAccess::kWrite};
CommandBufferCmdSequence commands;
commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access,
LaunchDimensions(1, 4),
0);
TF_ASSERT_OK_AND_ASSIGN(std::vector<uint8_t> fatbin,
se::gpu::GetGpuTestKernelsFatbin());
Thunk::ExecutableSource source = {{},
fatbin};
CommandBufferCmd::StateManager state;
TF_ASSERT_OK(commands.Initialize({executor, source}, state));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
CommandBufferCmd::RecordParams record_params = {state};
auto command_buffer =
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value();
TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get()));
TF_ASSERT_OK(command_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
}
TEST(CommandBufferCmdStateManageTest, GetOrCreateState) {
struct TestState : public CommandBufferCmd::State {
int32_t value = 0;
};
CommandBufferCmd* cmd = reinterpret_cast<CommandBufferCmd*>(0x1234567);
CommandBufferCmd::StateManager state_manager;
auto* state0 = state_manager.GetOrNull<TestState>(cmd);
ASSERT_EQ(state0, nullptr);
auto* state1 = state_manager.GetOrCreate<TestState>(cmd);
ASSERT_EQ(state1->value, 0);
state1->value += 42;
auto* state2 = state_manager.GetOrCreate<TestState>(cmd);
ASSERT_EQ(state2->value, 42);
ASSERT_EQ(state1, state2);
}
TEST(TracedCommandBuffer, GetOrUpdateCommandBuffer) {
auto run_traced_test = [](int trace_cache_size) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
auto traced_cmd = FakeCmd(ExecutionStreamId(0));
BufferAllocation alloc0(0, 1024, 0);
BufferAllocation alloc1(1, 1024, 0);
CommandBufferCmd::BufferUsageVector buffers = {
{BufferAllocation::Slice(&alloc0, 0, 1024), MemoryAccess::kRead},
{BufferAllocation::Slice(&alloc1, 0, 1024), MemoryAccess::kWrite}};
TracedCommandBuffer traced_cmd_buffer(&traced_cmd, buffers,
trace_cache_size);
se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x01234567));
se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x12345670));
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({mem0, mem1}, 0, &allocator);
int64_t num_calls = 0;
auto trace = [&](se::Stream*) {
num_calls++;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer0,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer1,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_EQ(command_buffer0, command_buffer1);
EXPECT_EQ(num_calls, 1);
se::DeviceMemoryBase mem2(reinterpret_cast<void*>(0x23456701));
allocations = BufferAllocations({mem0, mem2}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer2,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_NE(command_buffer0, command_buffer2);
EXPECT_EQ(num_calls, 2);
allocations = BufferAllocations({mem0, mem1}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer3,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_EQ(command_buffer0, command_buffer3);
EXPECT_EQ(num_calls, 2);
allocations = BufferAllocations({mem0, mem0}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer4,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_NE(command_buffer4, command_buffer3);
ASSERT_NE(command_buffer4, command_buffer2);
EXPECT_EQ(num_calls, 3);
allocations = BufferAllocations({mem0, mem1}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer5,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_EQ(command_buffer0, command_buffer5);
EXPECT_EQ(num_calls, 3);
};
run_traced_test(2);
run_traced_test(3);
}
static void BM_GetOrTraceCommandBuffer(benchmark::State& state) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
BufferAllocation alloc0(0, 1024, 0);
BufferAllocation alloc1(1, 1024, 0);
CommandBufferCmd::BufferUsageVector buffers = {
{BufferAllocation::Slice(&alloc0, 0, 1024), MemoryAccess::kRead},
{BufferAllocation::Slice(&alloc1, 0, 1024), MemoryAccess::kWrite}};
se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x01234567));
se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x12345670));
se::StreamExecutorMemoryAllocator allocator(executor);
std::array<BufferAllocations, 4> allocations = {
BufferAllocations({mem0, mem1}, 0, &allocator),
BufferAllocations({mem1, mem0}, 0, &allocator),
BufferAllocations({mem0, mem0}, 0, &allocator),
BufferAllocations({mem1, mem1}, 0, &allocator),
};
int32_t index = 0;
auto traced_cmd = FakeCmd(ExecutionStreamId(0));
TracedCommandBuffer traced_cmd_buffer(&traced_cmd, buffers);
auto trace = [](se::Stream*) { return absl::OkStatus(); };
absl::FunctionRef<absl::Status(se::Stream*)> trace_ref(trace);
for (auto s : state) {
TF_CHECK_OK(traced_cmd_buffer
.GetOrTraceCommandBuffer(&allocations[index++ % 4],
executor, stream.get(), trace_ref)
.status());
}
}
BENCHMARK(BM_GetOrTraceCommandBuffer);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/command_buffer_cmd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/command_buffer_cmd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a665630-8cc7-44cb-a331-2ac303dc883c | cpp | tensorflow/tensorflow | descriptor_pool_registry | tensorflow/core/util/proto/descriptor_pool_registry.cc | tensorflow/core/util/proto/descriptor_pool_registry_test.cc | #include <string>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/proto/descriptor_pool_registry.h"
namespace tensorflow {
DescriptorPoolRegistry* DescriptorPoolRegistry::Global() {
static DescriptorPoolRegistry* registry = new DescriptorPoolRegistry;
return registry;
}
DescriptorPoolRegistry::DescriptorPoolFn* DescriptorPoolRegistry::Get(
const string& source) {
auto found = fns_.find(source);
if (found == fns_.end()) return nullptr;
return &found->second;
}
void DescriptorPoolRegistry::Register(
const string& source,
const DescriptorPoolRegistry::DescriptorPoolFn& pool_fn) {
auto existing = Get(source);
CHECK_EQ(existing, nullptr)
<< "descriptor pool for source: " << source << " already registered";
fns_.insert(std::pair<const string&, DescriptorPoolFn>(source, pool_fn));
}
} | #include "tensorflow/core/util/proto/descriptor_pool_registry.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
struct Value {
static Status Function(
tensorflow::protobuf::DescriptorPool const** desc_pool,
std::unique_ptr<tensorflow::protobuf::DescriptorPool>* owned_desc_pool) {
return absl::OkStatus();
}
};
REGISTER_DESCRIPTOR_POOL("TEST POOL 1", Value::Function);
REGISTER_DESCRIPTOR_POOL("TEST POOL 2", Value::Function);
}
TEST(DescriptorPoolRegistryTest, TestBasic) {
EXPECT_EQ(DescriptorPoolRegistry::Global()->Get("NON-EXISTENT"), nullptr);
auto pool1 = DescriptorPoolRegistry::Global()->Get("TEST POOL 1");
EXPECT_NE(pool1, nullptr);
auto pool2 = DescriptorPoolRegistry::Global()->Get("TEST POOL 2");
EXPECT_NE(pool2, nullptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/proto/descriptor_pool_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/proto/descriptor_pool_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5657ffe1-80dd-465c-9d29-73c706d6baa7 | cpp | tensorflow/tensorflow | delegate_loader | tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.cc | tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader_test.cc | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <cerrno>
#include <string>
#include "absl/strings/numbers.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
namespace utils {
namespace {
void setLibraryPathEnvironmentVariable(const std::string& delegate_path) {
std::string directory_path = "";
size_t last_slash_index = delegate_path.rfind('/');
if (last_slash_index != std::string::npos) {
directory_path = delegate_path.substr(0, last_slash_index);
}
if (setenv(kTfLiteLibraryPathEnvironmentVariable, directory_path.c_str(),
1) != 0) {
TFLITE_LOG(WARN) << "Error setting environment variable "
<< kTfLiteLibraryPathEnvironmentVariable
<< " with error: " << strerror(errno);
}
}
}
using ::tflite::acceleration::AndroidInfo;
using ::tflite::acceleration::RequestAndroidInfo;
const TfLiteStableDelegate* LoadDelegateFromSharedLibrary(
const std::string& delegate_path) {
void* symbol_pointer =
LoadSymbolFromSharedLibrary(delegate_path, kTfLiteStableDelegateSymbol);
if (!symbol_pointer) {
return nullptr;
}
return reinterpret_cast<const TfLiteStableDelegate*>(symbol_pointer);
}
void* LoadSymbolFromSharedLibrary(const std::string& delegate_path,
const std::string& delegate_symbol) {
void* delegate_lib_handle = nullptr;
int dlopen_flags = RTLD_NOW | RTLD_LOCAL;
int sdk_version;
AndroidInfo android_info;
if (RequestAndroidInfo(&android_info).ok() &&
absl::SimpleAtoi(android_info.android_sdk_version, &sdk_version) &&
sdk_version >= 23) {
dlopen_flags |= RTLD_NODELETE;
TFLITE_LOG(INFO) << "Android SDK level is " << sdk_version
<< ", using dlopen with RTLD_NODELETE.";
}
setLibraryPathEnvironmentVariable(delegate_path);
delegate_lib_handle = dlopen(delegate_path.c_str(), dlopen_flags);
if (!delegate_lib_handle) {
TFLITE_LOG(ERROR) << "Failed to open library " << delegate_path << ": "
<< dlerror();
return nullptr;
}
void* symbol_pointer = dlsym(delegate_lib_handle, delegate_symbol.c_str());
if (!symbol_pointer) {
TFLITE_LOG(ERROR) << "Failed to find " << delegate_symbol
<< " symbol: " << dlerror();
dlclose(delegate_lib_handle);
return nullptr;
}
return symbol_pointer;
}
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include <cstdlib>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
namespace {
using tflite::TFLiteSettings;
using tflite::TFLiteSettingsBuilder;
using tflite::delegates::utils::LoadDelegateFromSharedLibrary;
using tflite::delegates::utils::LoadSymbolFromSharedLibrary;
TEST(TfLiteDelegateLoaderUtilsTest, Simple) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/"
"libtensorflowlite_sample_stable_delegate.so"
);
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
EXPECT_NE(stable_delegate_handle->delegate_plugin, nullptr);
EXPECT_STREQ(
getenv(tflite::delegates::utils::kTfLiteLibraryPathEnvironmentVariable),
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate");
flatbuffers::FlatBufferBuilder flatbuffer_builder;
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder.Finish(tflite_settings);
const TFLiteSettings* settings = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder.GetBufferPointer());
auto delegate = stable_delegate_handle->delegate_plugin->create(settings);
ASSERT_NE(delegate, nullptr);
EXPECT_EQ(
stable_delegate_handle->delegate_plugin->get_delegate_errno(delegate), 0);
stable_delegate_handle->delegate_plugin->destroy(delegate);
}
TEST(TfLiteDelegateLoaderUtilsTest, WrongSymbolReturnsNullptr) {
void* symbol_pointer = LoadSymbolFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/libtensorflowlite_sample_stable_delegate.so",
"NOT_REAL_SYMBOL");
EXPECT_EQ(symbol_pointer, nullptr);
}
TEST(TfLiteDelegateLoaderUtilsTest, MissingLibReturnsNullptr) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary("not_real_delegate.so");
EXPECT_EQ(stable_delegate_handle, nullptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4ee02b11-1f7f-48c0-be99-6a067fa5e82a | cpp | google/arolla | tuple_qtype | arolla/qtype/tuple_qtype.cc | arolla/qtype/tuple_qtype_test.cc | #include "arolla/qtype/tuple_qtype.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/string.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
class Tuple {};
class TupleQType final : public QType {
public:
static std::unique_ptr<TupleQType> Make(
absl::Span<const QTypePtr> field_qtypes) {
FrameLayout::Builder layout_builder;
std::vector<TypedSlot> fields;
fields.reserve(field_qtypes.size());
for (auto field_qtype : field_qtypes) {
fields.push_back(AddSlot(field_qtype, &layout_builder));
}
bool needTupleTag = true;
for (const auto& field : fields) {
if (field.byte_offset() == 0 &&
field.GetType()->type_layout().HasField(0, typeid(Tuple))) {
needTupleTag = false;
break;
}
}
if (needTupleTag) {
auto status = layout_builder.RegisterUnsafeSlot(0, 0, typeid(Tuple));
if (!status.ok()) {
LOG(FATAL) << status;
}
}
return std::make_unique<TupleQType>(
field_qtypes, std::move(layout_builder).Build(), std::move(fields));
}
TupleQType(absl::Span<const QTypePtr> field_qtypes, FrameLayout&& layout,
std::vector<TypedSlot>&& fields)
: QType(ConstructorArgs{
.name = absl::StrCat("tuple<", JoinTypeNames(field_qtypes), ">"),
.type_info = typeid(Tuple),
.type_layout = std::move(layout),
.type_fields = std::move(fields),
.qtype_specialization_key = "::arolla::TupleQType",
}),
field_qtypes_(field_qtypes.begin(), field_qtypes.end()) {}
absl::Span<const QTypePtr> field_qtypes() const { return field_qtypes_; }
void UnsafeCopy(const void* source, void* destination) const override {
ConstFramePtr source_frame(source, &type_layout());
FramePtr destination_frame(destination, &type_layout());
for (const auto& field : type_fields()) {
field.CopyTo(source_frame, field, destination_frame);
}
}
void UnsafeCombineToFingerprintHasher(
const void* source, FingerprintHasher* hasher) const override {
hasher->Combine(type_fields().size());
for (const auto& field : type_fields()) {
field.GetType()->UnsafeCombineToFingerprintHasher(
static_cast<const char*>(source) + field.byte_offset(), hasher);
}
}
ReprToken UnsafeReprToken(const void* source) const override {
ConstFramePtr frame_ptr(source, &type_layout());
std::ostringstream result;
result << "(";
bool first = true;
for (const auto& field : type_fields()) {
result << NonFirstComma(first)
<< TypedRef::FromSlot(field, frame_ptr).Repr();
}
result << ")";
return ReprToken{std::move(result).str()};
}
private:
std::vector<QTypePtr> field_qtypes_;
};
class TupleQTypeRegistry {
public:
static TupleQTypeRegistry* instance() {
static absl::NoDestructor<TupleQTypeRegistry> result;
return result.get();
}
QTypePtr GetQType(absl::Span<const QTypePtr> field_qtypes)
ABSL_LOCKS_EXCLUDED(lock_) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find(field_qtypes); it != registry_.end()) {
return it->second.get();
}
}
auto tuple_qtype = TupleQType::Make(field_qtypes);
absl::MutexLock guard(&lock_);
return registry_
.try_emplace(tuple_qtype->field_qtypes(), std::move(tuple_qtype))
.first->second.get();
}
private:
absl::Mutex lock_;
absl::flat_hash_map<absl::Span<const QTypePtr>, std::unique_ptr<TupleQType>>
registry_ ABSL_GUARDED_BY(lock_);
};
template <typename T >
TypedValue MakeTupleImpl(absl::Span<const T> fields) {
std::vector<QTypePtr> field_types;
field_types.reserve(fields.size());
for (const auto& field : fields) {
field_types.push_back(field.GetType());
}
auto status_or_result =
TypedValue::FromFields(MakeTupleQType(field_types), fields);
DCHECK_OK(status_or_result.status());
return status_or_result.value_or(TypedValue::FromValue(Unit{}));
}
template <typename T >
absl::StatusOr<TypedValue> MakeNamedTupleImpl(
absl::Span<const std::string> field_names, absl::Span<const T> fields) {
std::vector<QTypePtr> field_qtypes;
field_qtypes.reserve(fields.size());
for (const auto& field : fields) {
field_qtypes.push_back(field.GetType());
}
ASSIGN_OR_RETURN(
auto named_tuple_qtype,
MakeNamedTupleQType(field_names, MakeTupleQType(field_qtypes)));
absl::StatusOr<TypedValue> result =
TypedValue::FromFields(named_tuple_qtype, fields);
DCHECK_OK(result.status());
return std::move(result).value_or(TypedValue::FromValue(Unit{}));
}
std::string NamedTupleQTypeName(absl::Span<const std::string> field_names,
QTypePtr tuple_qtype) {
constexpr size_t kMaxFieldNames = 5;
std::ostringstream o;
o << "namedtuple<";
size_t fields_to_report = std::min(field_names.size(), kMaxFieldNames);
for (size_t i = 0; i != fields_to_report; ++i) {
if (i != 0) {
o << ",";
}
o << field_names[i] << "="
<< tuple_qtype->type_fields()[i].GetType()->name();
}
if (fields_to_report < field_names.size()) {
o << ", [" << field_names.size() - fields_to_report << " fields]";
}
o << ">";
return o.str();
}
class NamedTupleQType final : public BasicDerivedQType,
public NamedFieldQTypeInterface {
public:
NamedTupleQType(absl::Span<const std::string> field_names,
QTypePtr tuple_qtype)
: BasicDerivedQType(ConstructorArgs{
.name = NamedTupleQTypeName(field_names, tuple_qtype),
.base_qtype = tuple_qtype,
.qtype_specialization_key = "::arolla::NamedTupleQType",
}),
field_names_(field_names.begin(), field_names.end()) {
name2index_.reserve(field_names.size());
int64_t id = 0;
for (const std::string& name : field_names_) {
name2index_.emplace(name, id++);
}
}
absl::Span<const std::string> GetFieldNames() const final {
return field_names_;
}
std::optional<int64_t> GetFieldIndexByName(
absl::string_view field_name) const final {
if (auto it = name2index_.find(field_name); it != name2index_.end()) {
return it->second;
}
return std::nullopt;
}
private:
absl::flat_hash_map<absl::string_view, int64_t> name2index_;
std::vector<std::string> field_names_;
};
class NamedTupleQTypeRegistry {
public:
static NamedTupleQTypeRegistry* instance() {
static absl::NoDestructor<NamedTupleQTypeRegistry> result;
return result.get();
}
QTypePtr GetQType(absl::Span<const std::string> field_names,
QTypePtr tuple_qtype) ABSL_LOCKS_EXCLUDED(lock_) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find({field_names, tuple_qtype});
it != registry_.end()) {
return it->second.get();
}
}
auto named_tuple_qtype =
std::make_unique<NamedTupleQType>(field_names, tuple_qtype);
absl::MutexLock guard(&lock_);
return registry_
.try_emplace({named_tuple_qtype->GetFieldNames(), tuple_qtype},
std::move(named_tuple_qtype))
.first->second.get();
}
private:
using RegistryKey = std::pair<absl::Span<const std::string>, QTypePtr>;
absl::Mutex lock_;
absl::flat_hash_map<RegistryKey, std::unique_ptr<NamedTupleQType>> registry_
ABSL_GUARDED_BY(lock_);
};
}
bool IsTupleQType(const QType* qtype) {
return fast_dynamic_downcast_final<const TupleQType*>(qtype) != nullptr;
}
QTypePtr MakeTupleQType(absl::Span<const QTypePtr> field_qtypes) {
return TupleQTypeRegistry::instance()->GetQType(field_qtypes);
}
TypedValue MakeTuple(absl::Span<const TypedRef> fields) {
return MakeTupleImpl(fields);
}
TypedValue MakeTuple(absl::Span<const TypedValue> fields) {
return MakeTupleImpl(fields);
}
absl::StatusOr<TypedValue> MakeNamedTuple(
absl::Span<const std::string> field_names,
absl::Span<const TypedRef> fields) {
return MakeNamedTupleImpl(field_names, fields);
}
absl::StatusOr<TypedValue> MakeNamedTuple(
absl::Span<const std::string> field_names,
absl::Span<const TypedValue> fields) {
return MakeNamedTupleImpl(field_names, fields);
}
bool IsNamedTupleQType(const QType* qtype) {
return fast_dynamic_downcast_final<const NamedTupleQType*>(qtype) != nullptr;
}
absl::StatusOr<QTypePtr> MakeNamedTupleQType(
absl::Span<const std::string> field_names, QTypePtr tuple_qtype) {
if (!IsTupleQType(tuple_qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"incorrect NamedTupleQType: expected tuple, found %s",
tuple_qtype != nullptr ? tuple_qtype->name() : std::string("nullptr")));
}
if (field_names.size() != tuple_qtype->type_fields().size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"incorrect NamedTupleQType #field_names != #fields: %d vs %d",
field_names.size(), tuple_qtype->type_fields().size()));
}
absl::flat_hash_set<absl::string_view> name_set;
for (const std::string& name : field_names) {
if (!name_set.insert(name).second) {
return absl::InvalidArgumentError(absl::StrFormat(
"incorrect NamedTupleQType: field name %s is duplicated", name));
}
}
return NamedTupleQTypeRegistry::instance()->GetQType(field_names,
tuple_qtype);
}
} | #include "arolla/qtype/tuple_qtype.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla::testing {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::MatchesRegex;
TEST(TupleQType, Empty) {
auto qtype = MakeTupleQType({});
EXPECT_TRUE(IsTupleQType(qtype));
EXPECT_EQ(qtype->name(), "tuple<>");
EXPECT_EQ(qtype->type_layout().AllocSize(), 0);
EXPECT_EQ(qtype->type_layout().AllocAlignment().value, 1);
auto value = MakeTupleFromFields();
EXPECT_EQ(value.GetType(), qtype);
EXPECT_EQ(value.GetFieldCount(), 0);
EXPECT_THAT(value.GenReprToken(), ReprTokenEq("()"));
}
TEST(TupleQType, EmptyRegression) {
auto qtype_0 = MakeTupleQType({});
auto qtype_1 = MakeTupleQType({qtype_0, qtype_0});
EXPECT_TRUE(IsTupleQType(qtype_1));
EXPECT_EQ(qtype_1->name(), "tuple<tuple<>,tuple<>>");
EXPECT_EQ(qtype_1->type_layout().AllocSize(), 0);
EXPECT_EQ(qtype_1->type_layout().AllocAlignment().value, 1);
auto value_0 = MakeTupleFromFields();
auto value_1 = MakeTupleFromFields(value_0, value_0);
EXPECT_EQ(value_1.GetType(), qtype_1);
auto copy_1 = TypedValue(value_1.AsRef());
EXPECT_EQ(value_1.GetFingerprint(), copy_1.GetFingerprint());
EXPECT_THAT(value_1.GenReprToken(), ReprTokenEq("((), ())"));
}
TEST(TupleQType, Trivial) {
auto qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
EXPECT_TRUE(IsTupleQType(qtype));
EXPECT_EQ(qtype->name(), "tuple<INT32,FLOAT64,BYTES>");
auto value = MakeTupleFromFields(int32_t{34}, double{17}, Bytes("Hello"));
EXPECT_EQ(value.GetType(), qtype);
EXPECT_EQ(value.GetFieldCount(), 3);
EXPECT_THAT(value.GetField(0).As<int32_t>(), IsOkAndHolds(int32_t{34}));
EXPECT_THAT(value.GetField(1).As<double>(), IsOkAndHolds(double{17.}));
ASSERT_OK_AND_ASSIGN(Bytes bytes, value.GetField(2).As<Bytes>());
EXPECT_THAT(bytes, Eq(Bytes("Hello")));
EXPECT_THAT(value.GenReprToken(), ReprTokenEq("(34, float64{17}, b'Hello')"));
}
TEST(TupleQType, CopyTo) {
auto qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
EXPECT_TRUE(IsTupleQType(qtype));
EXPECT_EQ(qtype->name(), "tuple<INT32,FLOAT64,BYTES>");
auto value = MakeTupleFromFields(int32_t{34}, double{17}, Bytes("Hello"));
EXPECT_THAT(value.GetField(0).As<int32_t>(), IsOkAndHolds(int32_t{34}));
EXPECT_THAT(value.GetField(1).As<double>(), IsOkAndHolds(double{17.}));
auto copy = TypedValue(value.AsRef());
EXPECT_EQ(value.GetFingerprint(), copy.GetFingerprint());
EXPECT_THAT(copy.GenReprToken(), ReprTokenEq("(34, float64{17}, b'Hello')"));
}
TEST(TupleQType, QValueFromFields) {
auto qtype = MakeTupleQType({GetQType<int>(), GetQType<float>()});
{
ASSERT_OK_AND_ASSIGN(auto qvalue, TypedValue::FromFields(
qtype, {TypedRef::FromValue(2),
TypedRef::FromValue(3.14f)}));
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
ASSERT_OK_AND_ASSIGN(
auto qvalue,
TypedValue::FromFields(
qtype, {TypedValue::FromValue(2), TypedValue::FromValue(3.14f)}));
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
EXPECT_THAT(TypedValue::FromFields(qtype, {TypedValue::FromValue(2)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected 2 values, got 1; "
"compound_qtype=tuple<INT32,FLOAT32>")));
}
{
EXPECT_THAT(TypedValue::FromFields(qtype, {TypedValue::FromValue(2),
TypedValue::FromValue(3)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected fields[1]: FLOAT32, got INT32; "
"compound_qtype=tuple<INT32,FLOAT32>")));
}
}
TEST(NamedTupleQType, Empty) {
auto tuple_qtype = MakeTupleQType({});
ASSERT_OK_AND_ASSIGN(auto qtype, MakeNamedTupleQType({}, tuple_qtype));
EXPECT_TRUE(IsNamedTupleQType(qtype));
EXPECT_THAT(GetFieldNames(qtype), IsEmpty());
}
TEST(NamedTupleQType, Trivial) {
auto tuple_qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
ASSERT_OK_AND_ASSIGN(auto qtype,
MakeNamedTupleQType({"a", "b", "c"}, tuple_qtype));
EXPECT_TRUE(IsNamedTupleQType(qtype));
EXPECT_EQ(qtype->name(), "namedtuple<a=INT32,b=FLOAT64,c=BYTES>");
EXPECT_EQ(GetFieldIndexByName(nullptr, "a"), std::nullopt);
EXPECT_EQ(GetFieldIndexByName(qtype, "a"), 0);
EXPECT_EQ(GetFieldIndexByName(qtype, "b"), 1);
EXPECT_EQ(GetFieldIndexByName(qtype, "c"), 2);
EXPECT_EQ(GetFieldIndexByName(qtype, "d"), std::nullopt);
EXPECT_THAT(GetFieldNames(qtype), ElementsAre("a", "b", "c"));
EXPECT_EQ(GetFieldQTypeByName(nullptr, "a"), nullptr);
EXPECT_EQ(GetFieldQTypeByName(qtype, "a"), GetQType<int32_t>());
EXPECT_EQ(GetFieldQTypeByName(qtype, "b"), GetQType<double>());
EXPECT_EQ(GetFieldQTypeByName(qtype, "c"), GetQType<Bytes>());
EXPECT_EQ(GetFieldQTypeByName(qtype, "d"), nullptr);
auto derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype);
ASSERT_NE(derived_qtype_interface, nullptr);
EXPECT_EQ(derived_qtype_interface->GetBaseQType(), tuple_qtype);
{
ASSERT_OK_AND_ASSIGN(auto qtype2,
MakeNamedTupleQType({"a", "b", "c"}, tuple_qtype));
EXPECT_EQ(qtype, qtype2);
EXPECT_THAT(GetFieldNames(qtype2), ElementsAre("a", "b", "c"));
}
{
ASSERT_OK_AND_ASSIGN(auto qtype2,
MakeNamedTupleQType({"c", "b", "a"}, tuple_qtype));
EXPECT_EQ(qtype2->name(), "namedtuple<c=INT32,b=FLOAT64,a=BYTES>");
EXPECT_EQ(GetFieldIndexByName(qtype2, "c"), 0);
EXPECT_NE(qtype, qtype2);
EXPECT_THAT(GetFieldNames(qtype2), ElementsAre("c", "b", "a"));
}
{
auto tuple_qtype2 = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<int32_t>()});
ASSERT_OK_AND_ASSIGN(auto qtype2,
MakeNamedTupleQType({"a", "b", "c"}, tuple_qtype2));
EXPECT_EQ(qtype2->name(), "namedtuple<a=INT32,b=FLOAT64,c=INT32>");
EXPECT_NE(qtype, qtype2);
EXPECT_THAT(GetFieldNames(qtype2), ElementsAre("a", "b", "c"));
}
}
TEST(NamedTupleQType, QValueFromFields) {
auto tuple_qtype = MakeTupleQType({GetQType<int>(), GetQType<float>()});
ASSERT_OK_AND_ASSIGN(auto qtype,
MakeNamedTupleQType({"a", "b"}, tuple_qtype));
{
ASSERT_OK_AND_ASSIGN(auto qvalue, TypedValue::FromFields(
qtype, {TypedRef::FromValue(2),
TypedRef::FromValue(3.14f)}));
EXPECT_TRUE(IsNamedTupleQType(qvalue.GetType()));
EXPECT_EQ(qvalue.GetType(), qtype);
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
ASSERT_OK_AND_ASSIGN(
auto qvalue,
TypedValue::FromFields(
qtype, {TypedValue::FromValue(2), TypedValue::FromValue(3.14f)}));
EXPECT_TRUE(IsNamedTupleQType(qvalue.GetType()));
EXPECT_EQ(qvalue.GetType(), qtype);
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
EXPECT_THAT(
TypedValue::FromFields(qtype, {TypedValue::FromValue(2)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected 2 values, got 1; "
"compound_qtype=namedtuple<a=INT32,b=FLOAT32>")));
}
{
EXPECT_THAT(
TypedValue::FromFields(qtype, {TypedValue::FromValue(2),
TypedValue::FromValue(3)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected fields[1]: FLOAT32, got INT32; "
"compound_qtype=namedtuple<a=INT32,b=FLOAT32>")));
}
}
TEST(NamedTupleQType, BigTuple) {
constexpr size_t kFieldCount = 100;
QTypePtr field_qtype = GetQType<int32_t>();
auto tuple_qtype =
MakeTupleQType(std::vector<QTypePtr>{kFieldCount, field_qtype});
std::vector<std::string> names;
for (size_t i = 0; i != kFieldCount; ++i) {
names.push_back(std::to_string(i));
}
ASSERT_OK_AND_ASSIGN(auto qtype, MakeNamedTupleQType(names, tuple_qtype));
EXPECT_TRUE(IsNamedTupleQType(qtype));
EXPECT_THAT(GetFieldNames(qtype), ElementsAreArray(names));
EXPECT_EQ(qtype->name(),
"namedtuple<0=INT32,1=INT32,2=INT32,3=INT32,4=INT32, [95 fields]>");
}
TEST(NamedTupleQType, Errors) {
EXPECT_THAT(
MakeNamedTupleQType({"a", "b"}, nullptr).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*tuple.*found.*nullptr.*")));
EXPECT_THAT(
MakeNamedTupleQType({"a", "b"}, GetQType<int32_t>()).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*tuple.*found.*INT32.*")));
auto tuple_qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
EXPECT_THAT(MakeNamedTupleQType({"a", "b"}, tuple_qtype).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*2 vs 3.*")));
EXPECT_THAT(MakeNamedTupleQType({"a", "b", "a"}, tuple_qtype).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*a.*duplicate.*")));
EXPECT_THAT(GetFieldNames(nullptr), IsEmpty());
EXPECT_THAT(GetFieldNames(GetQType<int32_t>()), IsEmpty());
}
TEST(NamedTupleQType, GetFieldByNameAs) {
ASSERT_OK_AND_ASSIGN(auto named_tuple, MakeNamedTuple(
{"a", "b"}, {TypedRef::FromValue(2.0f), TypedRef::FromValue(3)}));
EXPECT_THAT(GetFieldByNameAs<float>(named_tuple.AsRef(), "a"),
IsOkAndHolds(2.0f));
EXPECT_THAT(GetFieldByNameAs<float>(named_tuple.AsRef(), "c").status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*no field named \"c\".*")));
EXPECT_THAT(
GetFieldByNameAs<Bytes>(named_tuple.AsRef(), "a").status(),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `float` (FLOAT32), got "
"`arolla::Bytes`; while accessing field \"a\"")));
}
TEST(NamedTupleQType, MakeNamedTuple) {
ASSERT_OK_AND_ASSIGN(auto named_tuple,
MakeNamedTuple({"a", "b"}, {TypedRef::FromValue(2.0f),
TypedRef::FromValue(3)}));
ASSERT_OK_AND_ASSIGN(
auto named_tuple_qtype,
MakeNamedTupleQType(
{"a", "b"}, MakeTupleQType({GetQType<float>(), GetQType<int>()})));
EXPECT_EQ(named_tuple.GetType(), named_tuple_qtype);
EXPECT_THAT(named_tuple.GenReprToken(),
ReprTokenEq("namedtuple<a=FLOAT32,b=INT32>{(2., 3)}"));
EXPECT_EQ(named_tuple.GetFieldCount(), 2);
}
TEST(NamedTupleQType, MakeEmptyNamedTuple) {
ASSERT_OK_AND_ASSIGN(auto named_tuple,
MakeNamedTuple({}, absl::Span<const TypedRef>{}));
ASSERT_OK_AND_ASSIGN(auto named_tuple_qtype,
MakeNamedTupleQType({}, MakeTupleQType({})));
EXPECT_EQ(named_tuple.GetType(), named_tuple_qtype);
EXPECT_THAT(named_tuple.GenReprToken(), ReprTokenEq("namedtuple<>{()}"));
EXPECT_EQ(named_tuple.GetFieldCount(), 0);
}
TEST(NamedTupleQtype, MakeNamedTuple_SameFromTypedValueAndTypedRef) {
ASSERT_OK_AND_ASSIGN(TypedValue named_tuple_from_values,
MakeNamedTuple({"a", "b"}, {TypedValue::FromValue(2.0f),
TypedValue::FromValue(3)}));
ASSERT_OK_AND_ASSIGN(auto named_tuple_from_refs,
MakeNamedTuple({"a", "b"}, {TypedRef::FromValue(2.0f),
TypedRef::FromValue(3)}));
EXPECT_EQ(named_tuple_from_values.GetFingerprint(),
named_tuple_from_refs.GetFingerprint());
}
TEST(NamedTupleQType, MakeNamedTuple_Error) {
EXPECT_THAT(
MakeNamedTuple({"a"},
{TypedValue::FromValue(2.0f), TypedValue::FromValue(3)}),
StatusIs(
absl::StatusCode::kInvalidArgument,
MatchesRegex(
"incorrect NamedTupleQType #field_names != #fields: 1 vs 2")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/tuple_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/tuple_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
37dcc1c8-b16f-4fd8-a653-6211c63fd914 | cpp | tensorflow/tensorflow | auto_shard_dataset_op | tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.cc | tensorflow/core/kernels/data/experimental/auto_shard_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const AutoShardDatasetOp::kAutoShardPolicy;
constexpr const char* const AutoShardDatasetOp::kDatasetType;
constexpr const char* const AutoShardDatasetOp::kInputDataset;
constexpr const char* const AutoShardDatasetOp::kNumWorkers;
constexpr const char* const AutoShardDatasetOp::kNumReplicas;
constexpr const char* const AutoShardDatasetOp::kIndex;
constexpr const char* const AutoShardDatasetOp::kOutputTypes;
constexpr const char* const AutoShardDatasetOp::kOutputShapes;
constexpr char kOptimizerName[] = "tf_auto_shard";
AutoShardDatasetOp::AutoShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), auto_shard_policy_(0) {
if (ctx->HasAttr(kAutoShardPolicy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kAutoShardPolicy, &auto_shard_policy_));
}
if (ctx->HasAttr(kNumReplicas)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kNumReplicas, &num_replicas_));
}
}
void AutoShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index, num_workers, auto_shard_policy, num_replicas;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers));
OP_REQUIRES(
ctx, num_workers > 0,
errors::InvalidArgument("num_workers must be greater than zero."));
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kIndex, &index));
OP_REQUIRES(
ctx, index >= 0 && index < num_workers,
errors::InvalidArgument("index must be between 0 and ", num_workers - 1));
auto_shard_policy = auto_shard_policy_;
if (input->options().distribute_options().auto_shard_policy() !=
AutoShardPolicy::AUTO) {
auto_shard_policy =
input->options().distribute_options().auto_shard_policy();
}
num_replicas = num_replicas_;
auto config_factory = [num_workers, index, auto_shard_policy,
num_replicas]() {
return CreateConfig(num_workers, index, auto_shard_policy, num_replicas);
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
RewriterConfig AutoShardDatasetOp::CreateConfig(int64_t num_workers,
int64_t index,
int64_t auto_shard_policy,
int64_t num_replicas) {
RewriterConfig rewriter_config;
rewriter_config.set_fail_on_optimizer_errors(true);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.add_optimizers(kOptimizerName);
auto custom_optimizer = rewriter_config.add_custom_optimizers();
custom_optimizer->set_name(kOptimizerName);
const std::array<std::pair<const char* const, int64_t>, 4> attr_pairs = {
{{kNumWorkers, num_workers},
{kIndex, index},
{kAutoShardPolicy, auto_shard_policy},
{kNumReplicas, num_replicas}}};
for (const auto& pair : attr_pairs) {
AttrValue attr;
attr.set_i(pair.second);
(*custom_optimizer->mutable_parameter_map())[pair.first] = attr;
}
return rewriter_config;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AutoShardDataset").Device(DEVICE_CPU),
AutoShardDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalAutoShardDataset").Device(DEVICE_CPU),
AutoShardDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include <string>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "auto_shard_dataset";
class AutoShardDatasetParams : public DatasetParams {
public:
template <typename T>
AutoShardDatasetParams(T input_dataset_params, int64_t num_workers,
int64_t index, int auto_shard_policy,
int64_t num_replicas, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_workers_(num_workers),
num_replicas_(num_replicas),
index_(index),
auto_shard_policy_(auto_shard_policy) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_workers_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(AutoShardDatasetOp::kInputDataset);
input_names->emplace_back(AutoShardDatasetOp::kNumWorkers);
input_names->emplace_back(AutoShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(AutoShardDatasetOp::kAutoShardPolicy,
auto_shard_policy_);
attr_vector->emplace_back(AutoShardDatasetOp::kNumReplicas, num_replicas_);
attr_vector->emplace_back(AutoShardDatasetOp::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(AutoShardDatasetOp::kOutputShapes,
output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return AutoShardDatasetOp::kDatasetType;
}
private:
int64_t num_workers_;
int64_t num_replicas_;
int64_t index_;
int auto_shard_policy_;
};
class AutoShardDatasetOpTest : public DatasetOpsTestBase {};
AutoShardDatasetParams AutoShardDatasetParams1() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams2() {
return AutoShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams3() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
0,
4,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams4() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams5() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams6() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams7() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AutoShardDatasetParams>> GetNextTestCases() {
return {
{AutoShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{AutoShardDatasetParams2(),
{}},
{AutoShardDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})}};
}
ITERATOR_GET_NEXT_TEST_P(AutoShardDatasetOpTest, AutoShardDatasetParams,
GetNextTestCases())
TEST_F(AutoShardDatasetOpTest, InvalidArguments) {
std::vector<AutoShardDatasetParams> invalid_dataset_params = {
AutoShardDatasetParams4(), AutoShardDatasetParams5(),
AutoShardDatasetParams6(), AutoShardDatasetParams7()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
REGISTER_OP("AutoShardDatasetOpTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"));
static void add_identity_nodes(Node* node, Graph& graph,
std::vector<Node*>& identity_nodes) {
for (int i = 0; i < node->num_outputs(); i++) {
Node* new_node;
std::string name = absl::StrCat("Identity", i);
TF_EXPECT_OK(NodeBuilder(name, "Identity")
.Attr("T", node->output_type(i))
.Input(node, i)
.Finalize(&graph, &new_node));
identity_nodes.push_back(new_node);
}
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST_F(AutoShardDatasetOpTest, AutoShardDatasetTypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* num_workers;
Node* index;
Node* auto_shard_dataset;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_workers", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &num_workers));
TF_EXPECT_OK(NodeBuilder("index", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &index));
TF_EXPECT_OK(NodeBuilder("AutoShardDataset", "AutoShardDataset")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(num_workers)
.Input(index)
.Finalize(&graph, &auto_shard_dataset));
std::vector<Node*> identity_nodes;
add_identity_nodes(auto_shard_dataset, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
TEST_F(AutoShardDatasetOpTest, RebatchDatasetTypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* num_replicas;
Node* rebatch_dataset;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_replicas", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &num_replicas));
TF_EXPECT_OK(NodeBuilder("RebatchDataset", "RebatchDataset")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(num_replicas)
.Finalize(&graph, &rebatch_dataset));
std::vector<Node*> identity_nodes;
add_identity_nodes(rebatch_dataset, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
TEST_F(AutoShardDatasetOpTest, RebatchDatasetV2TypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* batch_sizes;
Node* drop_remainder;
Node* rebatch_dataset_v2;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_replicas", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &batch_sizes));
TF_EXPECT_OK(
NodeBuilder("drop_remainder", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_BOOL)
.Finalize(&graph, &drop_remainder));
TF_EXPECT_OK(NodeBuilder("RebatchDatasetV2", "RebatchDatasetV2")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(batch_sizes)
.Input(drop_remainder)
.Finalize(&graph, &rebatch_dataset_v2));
std::vector<Node*> identity_nodes;
add_identity_nodes(rebatch_dataset_v2, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/auto_shard_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dee11806-ee1a-48a5-8587-2f45b172a9ae | cpp | tensorflow/tensorflow | fuse_convolutions | tensorflow/tools/graph_transforms/fuse_convolutions.cc | tensorflow/tools/graph_transforms/fuse_convolutions_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FuseResizePadAndConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"Conv2D",
{
{"MirrorPad",
{
{"ResizeBilinear"},
{"*"}
}
},
{"*"}
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& conv_node = match.node;
const NodeDef& mirror_pad_node = match.inputs[0].node;
const NodeDef& weights_node = match.inputs[1].node;
const NodeDef& resize_node = match.inputs[0].inputs[0].node;
const NodeDef& pad_dims_node = match.inputs[0].inputs[1].node;
new_nodes->push_back(weights_node);
new_nodes->push_back(pad_dims_node);
NodeDef fused_conv;
fused_conv.set_op("FusedResizeAndPadConv2D");
fused_conv.set_name(match.node.name());
AddNodeInput(resize_node.input(0), &fused_conv);
AddNodeInput(resize_node.input(1), &fused_conv);
AddNodeInput(mirror_pad_node.input(1), &fused_conv);
AddNodeInput(conv_node.input(1), &fused_conv);
CopyNodeAttr(resize_node, "align_corners", "resize_align_corners",
&fused_conv);
CopyNodeAttr(mirror_pad_node, "mode", "mode", &fused_conv);
CopyNodeAttr(conv_node, "T", "T", &fused_conv);
CopyNodeAttr(conv_node, "padding", "padding", &fused_conv);
CopyNodeAttr(conv_node, "strides", "strides", &fused_conv);
new_nodes->push_back(fused_conv);
return OkStatus();
},
{}, &replaced_graph_def));
*output_graph_def = replaced_graph_def;
return OkStatus();
}
Status FuseResizeAndConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"Conv2D",
{
{"ResizeBilinear"},
{"*"}
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& conv_node = match.node;
const NodeDef& resize_node = match.inputs[0].node;
const NodeDef& weights_node = match.inputs[1].node;
new_nodes->push_back(weights_node);
NodeDef pad_dims_node;
pad_dims_node.set_op("Const");
pad_dims_node.set_name(conv_node.name() + "_dummy_paddings");
SetNodeAttr("dtype", DT_INT32, &pad_dims_node);
SetNodeTensorAttr<int32>("value", {4, 2}, {0, 0, 0, 0, 0, 0, 0, 0},
&pad_dims_node);
new_nodes->push_back(pad_dims_node);
NodeDef fused_conv;
fused_conv.set_op("FusedResizeAndPadConv2D");
fused_conv.set_name(match.node.name());
AddNodeInput(resize_node.input(0), &fused_conv);
AddNodeInput(resize_node.input(1), &fused_conv);
AddNodeInput(pad_dims_node.name(), &fused_conv);
AddNodeInput(conv_node.input(1), &fused_conv);
CopyNodeAttr(resize_node, "align_corners", "resize_align_corners",
&fused_conv);
SetNodeAttr("mode", "REFLECT", &fused_conv);
CopyNodeAttr(conv_node, "T", "T", &fused_conv);
CopyNodeAttr(conv_node, "padding", "padding", &fused_conv);
CopyNodeAttr(conv_node, "strides", "strides", &fused_conv);
new_nodes->push_back(fused_conv);
return OkStatus();
},
{}, &replaced_graph_def));
*output_graph_def = replaced_graph_def;
return OkStatus();
}
Status FusePadAndConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"Conv2D",
{
{"MirrorPad",
{
{"*"},
{"*"},
}
},
{"*"}
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& conv_node = match.node;
CHECK_EQ("Conv2D", conv_node.op());
const NodeDef& mirror_pad_node = match.inputs[0].node;
CHECK_EQ("MirrorPad", mirror_pad_node.op());
const NodeDef& weights_node = match.inputs[1].node;
const NodeDef& input_node = match.inputs[0].inputs[0].node;
const NodeDef& pad_dims_node = match.inputs[0].inputs[1].node;
new_nodes->push_back(weights_node);
new_nodes->push_back(input_node);
new_nodes->push_back(pad_dims_node);
NodeDef fused_conv;
fused_conv.set_op("FusedPadConv2D");
fused_conv.set_name(match.node.name());
AddNodeInput(mirror_pad_node.input(0), &fused_conv);
AddNodeInput(mirror_pad_node.input(1), &fused_conv);
AddNodeInput(conv_node.input(1), &fused_conv);
CopyNodeAttr(mirror_pad_node, "mode", "mode", &fused_conv);
CopyNodeAttr(conv_node, "T", "T", &fused_conv);
CopyNodeAttr(conv_node, "padding", "padding", &fused_conv);
CopyNodeAttr(conv_node, "strides", "strides", &fused_conv);
new_nodes->push_back(fused_conv);
return OkStatus();
},
{}, &replaced_graph_def));
*output_graph_def = replaced_graph_def;
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("fuse_resize_pad_and_conv", FuseResizePadAndConv);
REGISTER_GRAPH_TRANSFORM("fuse_resize_and_conv", FuseResizeAndConv);
REGISTER_GRAPH_TRANSFORM("fuse_pad_and_conv", FusePadAndConv);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FuseResizePadAndConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status FuseResizeAndConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status FusePadAndConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class FuseConvolutionsTest : public ::testing::Test {
protected:
void TestFuseResizePadAndConv() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 2, 3, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Output resize_op = ResizeBilinear(root.WithOpName("resize_op"), input_op,
Const(root.WithOpName("size"), {12, 4}),
ResizeBilinear::AlignCorners(false));
Tensor pad_dims_data(DT_INT32, TensorShape({4, 2}));
test::FillValues<int32>(&pad_dims_data, {0, 0, 1, 1, 2, 2, 0, 0});
Output pad_dims_op = Const(root.WithOpName("pad_dims_op"),
Input::Initializer(pad_dims_data));
Output pad_op =
MirrorPad(root.WithOpName("pad_op"), resize_op, pad_dims_op, "REFLECT");
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("output"), pad_op, weights_op,
{1, 1, 1, 1}, "VALID");
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FuseResizePadAndConv(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("Conv2D", node.op());
EXPECT_NE("MirrorPad", node.op());
EXPECT_NE("ResizeBilinear", node.op());
}
}
void TestFuseResizeAndConv() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 2, 3, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Output resize_op = ResizeBilinear(root.WithOpName("resize_op"), input_op,
Const(root.WithOpName("size"), {12, 4}),
ResizeBilinear::AlignCorners(false));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("output"), resize_op, weights_op,
{1, 1, 1, 1}, "VALID");
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FuseResizeAndConv(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("Conv2D", node.op());
EXPECT_NE("ResizeBilinear", node.op());
}
}
void TestFusePadAndConv() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 2, 3, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor pad_dims_data(DT_INT32, TensorShape({4, 2}));
test::FillValues<int32>(&pad_dims_data, {0, 0, 1, 1, 2, 2, 0, 0});
Output pad_dims_op = Const(root.WithOpName("pad_dims_op"),
Input::Initializer(pad_dims_data));
Output pad_op =
MirrorPad(root.WithOpName("pad_op"), input_op, pad_dims_op, "REFLECT");
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("output"), pad_op, weights_op,
{1, 1, 1, 1}, "VALID");
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(
FusePadAndConv(original_graph_def, {{}, {"output"}}, &fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("Conv2D", node.op());
EXPECT_NE("MirrorPad", node.op());
}
}
};
TEST_F(FuseConvolutionsTest, TestFuseResizePadAndConv) {
TestFuseResizePadAndConv();
}
TEST_F(FuseConvolutionsTest, TestFuseResizeAndConv) { TestFuseResizeAndConv(); }
TEST_F(FuseConvolutionsTest, TestFusePadAndConv) { TestFusePadAndConv(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fuse_convolutions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fuse_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
518097c4-f474-42d5-b007-433b27b9c447 | cpp | google/quiche | chunked_buffer | quiche/http2/adapter/chunked_buffer.cc | quiche/http2/adapter/chunked_buffer_test.cc | #include "quiche/http2/adapter/chunked_buffer.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
namespace http2 {
namespace adapter {
namespace {
constexpr size_t kKilobyte = 1024;
size_t RoundUpToNearestKilobyte(size_t n) {
return ((n - 1) | (kKilobyte - 1)) + 1;
}
}
void ChunkedBuffer::Append(absl::string_view data) {
const size_t to_copy = std::min(TailBytesFree(), data.size());
if (to_copy > 0) {
chunks_.back().AppendSuffix(data.substr(0, to_copy));
data.remove_prefix(to_copy);
}
EnsureTailBytesFree(data.size());
chunks_.back().AppendSuffix(data);
}
void ChunkedBuffer::Append(std::unique_ptr<char[]> data, size_t size) {
if (TailBytesFree() >= size) {
Chunk& c = chunks_.back();
c.AppendSuffix(absl::string_view(data.get(), size));
return;
}
while (!chunks_.empty() && chunks_.front().Empty()) {
chunks_.pop_front();
}
absl::string_view v = {data.get(), size};
chunks_.push_back({std::move(data), size, v});
}
absl::string_view ChunkedBuffer::GetPrefix() const {
if (chunks_.empty()) {
return "";
}
return chunks_.front().live;
}
std::vector<absl::string_view> ChunkedBuffer::Read() const {
std::vector<absl::string_view> result;
result.reserve(chunks_.size());
for (const Chunk& c : chunks_) {
result.push_back(c.live);
}
return result;
}
void ChunkedBuffer::RemovePrefix(size_t n) {
while (!Empty() && n > 0) {
Chunk& c = chunks_.front();
const size_t to_remove = std::min(n, c.live.size());
c.RemovePrefix(to_remove);
n -= to_remove;
if (c.Empty()) {
TrimFirstChunk();
}
}
}
bool ChunkedBuffer::Empty() const {
return chunks_.empty() ||
(chunks_.size() == 1 && chunks_.front().live.empty());
}
void ChunkedBuffer::Chunk::RemovePrefix(size_t n) {
QUICHE_DCHECK_GE(live.size(), n);
live.remove_prefix(n);
}
void ChunkedBuffer::Chunk::AppendSuffix(absl::string_view to_append) {
QUICHE_DCHECK_GE(TailBytesFree(), to_append.size());
if (live.empty()) {
std::copy(to_append.begin(), to_append.end(), data.get());
live = absl::string_view(data.get(), to_append.size());
} else {
std::copy(to_append.begin(), to_append.end(),
const_cast<char*>(live.data()) + live.size());
live = absl::string_view(live.data(), live.size() + to_append.size());
}
}
size_t ChunkedBuffer::TailBytesFree() const {
if (chunks_.empty()) {
return 0;
}
return chunks_.back().TailBytesFree();
}
void ChunkedBuffer::EnsureTailBytesFree(size_t n) {
if (TailBytesFree() >= n) {
return;
}
const size_t to_allocate = RoundUpToNearestKilobyte(n);
auto data = std::unique_ptr<char[]>(new char[to_allocate]);
chunks_.push_back({std::move(data), to_allocate, ""});
}
void ChunkedBuffer::TrimFirstChunk() {
if (chunks_.empty() ||
(chunks_.size() == 1 && chunks_.front().size == kDefaultChunkSize)) {
return;
}
chunks_.pop_front();
}
}
} | #include "quiche/http2/adapter/chunked_buffer.h"
#include <algorithm>
#include <initializer_list>
#include <memory>
#include <utility>
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace {
constexpr absl::string_view kLoremIpsum =
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod "
"tempor incididunt ut labore et dolore magna aliqua.";
struct DataAndSize {
std::unique_ptr<char[]> data;
size_t size;
};
DataAndSize MakeDataAndSize(absl::string_view source) {
auto data = std::unique_ptr<char[]>(new char[source.size()]);
std::copy(source.begin(), source.end(), data.get());
return {std::move(data), source.size()};
}
TEST(ChunkedBufferTest, Empty) {
ChunkedBuffer buffer;
EXPECT_TRUE(buffer.Empty());
buffer.Append("some data");
EXPECT_FALSE(buffer.Empty());
buffer.RemovePrefix(9);
EXPECT_TRUE(buffer.Empty());
}
TEST(ChunkedBufferTest, ReusedAfterEmptied) {
ChunkedBuffer buffer;
buffer.Append("some data");
buffer.RemovePrefix(9);
buffer.Append("different data");
EXPECT_EQ("different data", buffer.GetPrefix());
}
TEST(ChunkedBufferTest, LargeAppendAfterEmptied) {
ChunkedBuffer buffer;
buffer.Append("some data");
EXPECT_THAT(buffer.GetPrefix(), testing::StartsWith("some data"));
buffer.RemovePrefix(9);
auto more_data =
MakeDataAndSize(absl::StrCat("different data", std::string(2048, 'x')));
buffer.Append(std::move(more_data.data), more_data.size);
EXPECT_THAT(buffer.GetPrefix(), testing::StartsWith("different data"));
}
TEST(ChunkedBufferTest, LargeAppends) {
ChunkedBuffer buffer;
buffer.Append(std::string(500, 'a'));
buffer.Append(std::string(2000, 'b'));
buffer.Append(std::string(10, 'c'));
auto more_data = MakeDataAndSize(std::string(4490, 'd'));
buffer.Append(std::move(more_data.data), more_data.size);
EXPECT_EQ(500 + 2000 + 10 + 4490, absl::StrJoin(buffer.Read(), "").size());
}
TEST(ChunkedBufferTest, RemovePartialPrefix) {
ChunkedBuffer buffer;
auto data_and_size = MakeDataAndSize(kLoremIpsum);
buffer.Append(std::move(data_and_size.data), data_and_size.size);
buffer.RemovePrefix(6);
EXPECT_THAT(buffer.GetPrefix(), testing::StartsWith("ipsum"));
buffer.RemovePrefix(20);
EXPECT_THAT(buffer.GetPrefix(), testing::StartsWith(", consectetur"));
buffer.Append(" Anday igpay atinlay!");
const std::initializer_list<absl::string_view> parts = {
kLoremIpsum.substr(26), " Anday igpay atinlay!"};
EXPECT_EQ(absl::StrJoin(parts, ""), absl::StrJoin(buffer.Read(), ""));
}
TEST(ChunkedBufferTest, DifferentAppends) {
ChunkedBuffer buffer;
buffer.Append("Lorem ipsum");
auto more_data = MakeDataAndSize(" dolor sit amet, ");
buffer.Append(std::move(more_data.data), more_data.size);
buffer.Append("consectetur adipiscing elit, ");
more_data = MakeDataAndSize("sed do eiusmod tempor incididunt ut ");
buffer.Append(std::move(more_data.data), more_data.size);
buffer.Append("labore et dolore magna aliqua.");
EXPECT_EQ(kLoremIpsum, absl::StrJoin(buffer.Read(), ""));
buffer.RemovePrefix(kLoremIpsum.size());
EXPECT_TRUE(buffer.Empty());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/chunked_buffer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/chunked_buffer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
3e2c2113-a259-4ad9-9ee0-e4be27194ce0 | cpp | abseil/abseil-cpp | bind_front | absl/functional/bind_front.h | absl/functional/bind_front_test.cc | #ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_
#define ABSL_FUNCTIONAL_BIND_FRONT_H_
#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
#include <functional>
#endif
#include <utility>
#include "absl/functional/internal/front_binder.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
using std::bind_front;
#else
template <class F, class... BoundArgs>
constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
F&& func, BoundArgs&&... args) {
return functional_internal::bind_front_t<F, BoundArgs...>(
absl::in_place, std::forward<F>(func), std::forward<BoundArgs>(args)...);
}
#endif
ABSL_NAMESPACE_END
}
#endif | #include "absl/functional/bind_front.h"
#include <stddef.h>
#include <functional>
#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/memory/memory.h"
namespace {
char CharAt(const char* s, size_t index) { return s[index]; }
TEST(BindTest, Basics) {
EXPECT_EQ('C', absl::bind_front(CharAt)("ABC", 2));
EXPECT_EQ('C', absl::bind_front(CharAt, "ABC")(2));
EXPECT_EQ('C', absl::bind_front(CharAt, "ABC", 2)());
}
TEST(BindTest, Lambda) {
auto lambda = [](int x, int y, int z) { return x + y + z; };
EXPECT_EQ(6, absl::bind_front(lambda)(1, 2, 3));
EXPECT_EQ(6, absl::bind_front(lambda, 1)(2, 3));
EXPECT_EQ(6, absl::bind_front(lambda, 1, 2)(3));
EXPECT_EQ(6, absl::bind_front(lambda, 1, 2, 3)());
}
struct Functor {
std::string operator()() & { return "&"; }
std::string operator()() const& { return "const&"; }
std::string operator()() && { return "&&"; }
std::string operator()() const&& { return "const&&"; }
};
TEST(BindTest, PerfectForwardingOfBoundArgs) {
auto f = absl::bind_front(Functor());
const auto& cf = f;
EXPECT_EQ("&", f());
EXPECT_EQ("const&", cf());
EXPECT_EQ("&&", std::move(f)());
EXPECT_EQ("const&&", std::move(cf)());
}
struct ArgDescribe {
std::string operator()(int&) const { return "&"; }
std::string operator()(const int&) const { return "const&"; }
std::string operator()(int&&) const { return "&&"; }
std::string operator()(const int&&) const { return "const&&"; }
};
TEST(BindTest, PerfectForwardingOfFreeArgs) {
ArgDescribe f;
int i;
EXPECT_EQ("&", absl::bind_front(f)(static_cast<int&>(i)));
EXPECT_EQ("const&", absl::bind_front(f)(static_cast<const int&>(i)));
EXPECT_EQ("&&", absl::bind_front(f)(static_cast<int&&>(i)));
EXPECT_EQ("const&&", absl::bind_front(f)(static_cast<const int&&>(i)));
}
struct NonCopyableFunctor {
NonCopyableFunctor() = default;
NonCopyableFunctor(const NonCopyableFunctor&) = delete;
NonCopyableFunctor& operator=(const NonCopyableFunctor&) = delete;
const NonCopyableFunctor* operator()() const { return this; }
};
TEST(BindTest, RefToFunctor) {
NonCopyableFunctor ncf;
auto bound_ncf = absl::bind_front(std::ref(ncf));
auto bound_ncf_copy = bound_ncf;
EXPECT_EQ(&ncf, bound_ncf_copy());
}
struct Struct {
std::string value;
};
TEST(BindTest, StoreByCopy) {
Struct s = {"hello"};
auto f = absl::bind_front(&Struct::value, s);
auto g = f;
EXPECT_EQ("hello", f());
EXPECT_EQ("hello", g());
EXPECT_NE(&s.value, &f());
EXPECT_NE(&s.value, &g());
EXPECT_NE(&g(), &f());
}
struct NonCopyable {
explicit NonCopyable(const std::string& s) : value(s) {}
NonCopyable(const NonCopyable&) = delete;
NonCopyable& operator=(const NonCopyable&) = delete;
std::string value;
};
const std::string& GetNonCopyableValue(const NonCopyable& n) { return n.value; }
TEST(BindTest, StoreByRef) {
NonCopyable s("hello");
auto f = absl::bind_front(&GetNonCopyableValue, std::ref(s));
EXPECT_EQ("hello", f());
EXPECT_EQ(&s.value, &f());
auto g = std::move(f);
EXPECT_EQ("hello", g());
EXPECT_EQ(&s.value, &g());
s.value = "goodbye";
EXPECT_EQ("goodbye", g());
}
TEST(BindTest, StoreByCRef) {
NonCopyable s("hello");
auto f = absl::bind_front(&GetNonCopyableValue, std::cref(s));
EXPECT_EQ("hello", f());
EXPECT_EQ(&s.value, &f());
auto g = std::move(f);
EXPECT_EQ("hello", g());
EXPECT_EQ(&s.value, &g());
s.value = "goodbye";
EXPECT_EQ("goodbye", g());
}
const std::string& GetNonCopyableValueByWrapper(
std::reference_wrapper<NonCopyable> n) {
return n.get().value;
}
TEST(BindTest, StoreByRefInvokeByWrapper) {
NonCopyable s("hello");
auto f = absl::bind_front(GetNonCopyableValueByWrapper, std::ref(s));
EXPECT_EQ("hello", f());
EXPECT_EQ(&s.value, &f());
auto g = std::move(f);
EXPECT_EQ("hello", g());
EXPECT_EQ(&s.value, &g());
s.value = "goodbye";
EXPECT_EQ("goodbye", g());
}
TEST(BindTest, StoreByPointer) {
NonCopyable s("hello");
auto f = absl::bind_front(&NonCopyable::value, &s);
EXPECT_EQ("hello", f());
EXPECT_EQ(&s.value, &f());
auto g = std::move(f);
EXPECT_EQ("hello", g());
EXPECT_EQ(&s.value, &g());
}
int Sink(std::unique_ptr<int> p) {
return *p;
}
std::unique_ptr<int> Factory(int n) { return absl::make_unique<int>(n); }
TEST(BindTest, NonCopyableArg) {
EXPECT_EQ(42, absl::bind_front(Sink)(absl::make_unique<int>(42)));
EXPECT_EQ(42, absl::bind_front(Sink, absl::make_unique<int>(42))());
}
TEST(BindTest, NonCopyableResult) {
EXPECT_THAT(absl::bind_front(Factory)(42), ::testing::Pointee(42));
EXPECT_THAT(absl::bind_front(Factory, 42)(), ::testing::Pointee(42));
}
template <class T>
struct FalseCopyable {
FalseCopyable() {}
FalseCopyable(const FalseCopyable& other) : m(other.m) {}
FalseCopyable(FalseCopyable&& other) : m(std::move(other.m)) {}
T m;
};
int GetMember(FalseCopyable<std::unique_ptr<int>> x) { return *x.m; }
TEST(BindTest, WrappedMoveOnly) {
FalseCopyable<std::unique_ptr<int>> x;
x.m = absl::make_unique<int>(42);
auto f = absl::bind_front(&GetMember, std::move(x));
EXPECT_EQ(42, std::move(f)());
}
int Plus(int a, int b) { return a + b; }
TEST(BindTest, ConstExpr) {
constexpr auto f = absl::bind_front(CharAt);
EXPECT_EQ(f("ABC", 1), 'B');
static constexpr int five = 5;
constexpr auto plus5 = absl::bind_front(Plus, five);
EXPECT_EQ(plus5(1), 6);
#if !(defined(_MSC_VER) && _MSC_VER < 1910)
static constexpr char data[] = "DEF";
constexpr auto g = absl::bind_front(CharAt, data);
EXPECT_EQ(g(1), 'E');
#endif
}
struct ManglingCall {
int operator()(int, double, std::string) const { return 0; }
};
TEST(BindTest, Mangling) {
absl::bind_front(ManglingCall{}, 1, 3.3)("A");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/functional/bind_front.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/functional/bind_front_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
4318bf3e-f2f8-4868-a881-8c5e2a50ffeb | cpp | tensorflow/tensorflow | repository | tensorflow/core/profiler/convert/repository.cc | tensorflow/core/profiler/convert/repository_test.cc | #include "tensorflow/core/profiler/convert/repository.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "xla/tsl/profiler/utils/file_system_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
std::string GetHostnameByPath(absl::string_view xspace_path) {
std::string_view file_name = tensorflow::io::Basename(xspace_path);
absl::ConsumeSuffix(&file_name, ".xplane.pb");
return std::string(file_name);
}
}
absl::StatusOr<SessionSnapshot> SessionSnapshot::Create(
std::vector<std::string> xspace_paths,
std::optional<std::vector<std::unique_ptr<XSpace>>> xspaces) {
if (xspace_paths.empty()) {
return errors::InvalidArgument("Can not find XSpace path.");
}
if (xspaces.has_value()) {
if (xspaces->size() != xspace_paths.size()) {
return errors::InvalidArgument(
"The size of the XSpace paths: ", xspace_paths.size(),
" is not equal ",
"to the size of the XSpace proto: ", xspaces->size());
}
for (size_t i = 0; i < xspace_paths.size(); ++i) {
auto host_name = GetHostnameByPath(xspace_paths.at(i));
if (xspaces->at(i)->hostnames_size() > 0 && !host_name.empty()) {
if (!absl::StrContains(host_name, xspaces->at(i)->hostnames(0))) {
return errors::InvalidArgument(
"The hostname of xspace path and preloaded xpace don't match at "
"index: ",
i, ". \nThe host name of xpace path is ", host_name,
" but the host name of preloaded xpace is ",
xspaces->at(i)->hostnames(0), ".");
}
}
}
}
return SessionSnapshot(std::move(xspace_paths), std::move(xspaces));
}
absl::StatusOr<std::unique_ptr<XSpace>> SessionSnapshot::GetXSpace(
size_t index) const {
if (index >= xspace_paths_.size()) {
return errors::InvalidArgument("Can not get the ", index,
"th XSpace. The total number of XSpace is ",
xspace_paths_.size());
}
if (xspaces_.has_value()) {
if (xspaces_->at(index) == nullptr) {
return errors::Internal("");
}
return std::move(xspaces_->at(index));
}
auto xspace_from_file = std::make_unique<XSpace>();
TF_RETURN_IF_ERROR(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
xspace_paths_.at(index),
xspace_from_file.get()));
return xspace_from_file;
}
absl::StatusOr<std::unique_ptr<XSpace>> SessionSnapshot::GetXSpaceByName(
absl::string_view name) const {
if (auto it = hostname_map_.find(name); it != hostname_map_.end()) {
return GetXSpace(it->second);
}
return errors::InvalidArgument("Can not find the XSpace by name: ", name,
". The total number of XSpace is ",
xspace_paths_.size());
}
std::string SessionSnapshot::GetHostname(size_t index) const {
return GetHostnameByPath(xspace_paths_.at(index));
}
std::optional<std::string> SessionSnapshot::GetFilePath(
absl::string_view toolname, absl::string_view hostname) const {
if (!has_accessible_run_dir_) return std::nullopt;
std::string file_name = "";
if (toolname == "trace_viewer@")
file_name = absl::StrCat(hostname, ".", "SSTABLE");
if (!file_name.empty())
return tensorflow::io::JoinPath(session_run_dir_, file_name);
return std::nullopt;
}
absl::StatusOr<std::string> SessionSnapshot::GetHostDataFileName(
const StoredDataType data_type, const std::string host) const {
for (const auto& format : *kHostDataSuffixes) {
if (data_type == format.first) return absl::StrCat(host, format.second);
}
return absl::InternalError(&"Unknown StoredDataType: "[data_type]);
}
absl::StatusOr<std::optional<std::string>> SessionSnapshot::GetHostDataFilePath(
const StoredDataType data_type, const std::string host) const {
std::vector<std::string> results;
TF_RETURN_IF_ERROR(::tsl::Env::Default()->GetChildren(
std::string(GetSessionRunDir()), &results));
TF_ASSIGN_OR_RETURN(std::string filename,
GetHostDataFileName(data_type, host));
for (const std::string& path : results) {
if (absl::EndsWith(path, filename)) {
return ::tsl::profiler::ProfilerJoinPath(GetSessionRunDir(), filename);
}
}
return std::nullopt;
}
absl::StatusOr<std::pair<bool, std::string>> SessionSnapshot::HasCacheFile(
const StoredDataType data_type) const {
std::optional<std::string> filepath;
TF_ASSIGN_OR_RETURN(filepath,
GetHostDataFilePath(data_type, kNoHostIdentifier));
if (filepath) {
return std::pair<bool, std::string>(true, std::string());
}
TF_ASSIGN_OR_RETURN(filepath,
GetHostDataFilePath(data_type, kAllHostsIdentifier));
if (filepath) {
return std::pair<bool, std::string>(true, filepath.value());
}
return std::pair<bool, std::string>(false, std::string());
}
}
} | #include "tensorflow/core/profiler/convert/repository.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/errors.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Eq;
TEST(Repository, GetHostName) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb",
"log/plugins/profile/hostname1.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
EXPECT_THAT(session_snapshot_or.value().GetHostname(0), Eq("hostname0"));
EXPECT_THAT(session_snapshot_or.value().GetHostname(1), Eq("hostname1"));
EXPECT_TRUE(session_snapshot_or.value().HasAccessibleRunDir());
}
TEST(Repository, GetHostNameWithPeriods) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/127.0.0.1_6009.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
EXPECT_THAT(session_snapshot_or.value().GetHostname(0), Eq("127.0.0.1_6009"));
EXPECT_TRUE(session_snapshot_or.value().HasAccessibleRunDir());
}
TEST(Repository, GetSpaceByHostName) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space1 = std::make_unique<XSpace>();
*(space1->add_hostnames()) = "hostname1";
xspaces.push_back(std::move(space1));
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname1.xplane.pb",
"log/plugins/profile/hostname0.xplane.pb"},
std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
auto xspace0_or = session_snapshot_or.value().GetXSpaceByName("hostname0");
TF_CHECK_OK(xspace0_or.status());
auto xspace1_or = session_snapshot_or.value().GetXSpaceByName("hostname1");
EXPECT_FALSE(session_snapshot_or.value().HasAccessibleRunDir());
TF_CHECK_OK(xspace1_or.status());
EXPECT_THAT(xspace0_or.value()->hostnames(0), Eq("hostname0"));
EXPECT_THAT(xspace1_or.value()->hostnames(0), Eq("hostname1"));
}
TEST(Repository, GetSSTableFile) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
auto sstable_path =
session_snapshot_or.value().GetFilePath("trace_viewer@", "hostname0");
auto not_found_path =
session_snapshot_or.value().GetFilePath("memory_viewer", "hostname0");
EXPECT_THAT(sstable_path, Eq("log/plugins/profile/hostname0.SSTABLE"));
EXPECT_THAT(not_found_path, Eq(std::nullopt));
}
TEST(Repository, GetSSTableFileWithXSpace) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or = SessionSnapshot::Create(
{"log/plugins/profile/hostname0.xplane.pb"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
auto file_path_init_by_xspace =
session_snapshot_or.value().GetFilePath("trace_viewer@", "hostname0");
EXPECT_THAT(file_path_init_by_xspace, Eq(std::nullopt));
}
TEST(Repository, MismatchedXSpaceAndPath) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space1 = std::make_unique<XSpace>();
*(space1->add_hostnames()) = "hostname1";
xspaces.push_back(std::move(space1));
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb",
"log/plugins/profile/hostname1.xplane.pb"},
std::move(xspaces));
auto error =
R"(The hostname of xspace path and preloaded xpace don't match at index: 0.
The host name of xpace path is hostname0 but the host name of preloaded xpace is hostname1.)";
EXPECT_THAT(session_snapshot_or.status(), Eq(errors::InvalidArgument(error)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/repository.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/repository_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c3fc5bd-4746-4545-bf0f-040ceafb6a90 | cpp | google/cel-cpp | flat_expr_builder_extensions | eval/compiler/flat_expr_builder_extensions.cc | eval/compiler/flat_expr_builder_extensions_test.cc | #include "eval/compiler/flat_expr_builder_extensions.h"
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "base/ast_internal/expr.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
namespace google::api::expr::runtime {
namespace {
using Subexpression = google::api::expr::runtime::ProgramBuilder::Subexpression;
void MaybeReassignChildRecursiveProgram(Subexpression* parent) {
if (parent->IsFlattened() || parent->IsRecursive()) {
return;
}
if (parent->elements().size() != 1) {
return;
}
auto* child_alternative =
absl::get_if<std::unique_ptr<Subexpression>>(&parent->elements()[0]);
if (child_alternative == nullptr) {
return;
}
auto& child_subexpression = *child_alternative;
if (!child_subexpression->IsRecursive()) {
return;
}
auto child_program = child_subexpression->ExtractRecursiveProgram();
parent->set_recursive_program(std::move(child_program.step),
child_program.depth);
}
}
Subexpression::Subexpression(const cel::ast_internal::Expr* self,
ProgramBuilder* owner)
: self_(self), parent_(nullptr), subprogram_map_(owner->subprogram_map_) {}
size_t Subexpression::ComputeSize() const {
if (IsFlattened()) {
return flattened_elements().size();
} else if (IsRecursive()) {
return 1;
}
std::vector<const Subexpression*> to_expand{this};
size_t size = 0;
while (!to_expand.empty()) {
const auto* expr = to_expand.back();
to_expand.pop_back();
if (expr->IsFlattened()) {
size += expr->flattened_elements().size();
continue;
} else if (expr->IsRecursive()) {
size += 1;
continue;
}
for (const auto& elem : expr->elements()) {
if (auto* child = absl::get_if<std::unique_ptr<Subexpression>>(&elem);
child != nullptr) {
to_expand.push_back(child->get());
} else {
size += 1;
}
}
}
return size;
}
absl::optional<int> Subexpression::RecursiveDependencyDepth() const {
auto* tree = absl::get_if<TreePlan>(&program_);
int depth = 0;
if (tree == nullptr) {
return absl::nullopt;
}
for (const auto& element : *tree) {
auto* subexpression =
absl::get_if<std::unique_ptr<Subexpression>>(&element);
if (subexpression == nullptr) {
return absl::nullopt;
}
if (!(*subexpression)->IsRecursive()) {
return absl::nullopt;
}
depth = std::max(depth, (*subexpression)->recursive_program().depth);
}
return depth;
}
std::vector<std::unique_ptr<DirectExpressionStep>>
Subexpression::ExtractRecursiveDependencies() const {
auto* tree = absl::get_if<TreePlan>(&program_);
std::vector<std::unique_ptr<DirectExpressionStep>> dependencies;
if (tree == nullptr) {
return {};
}
for (const auto& element : *tree) {
auto* subexpression =
absl::get_if<std::unique_ptr<Subexpression>>(&element);
if (subexpression == nullptr) {
return {};
}
if (!(*subexpression)->IsRecursive()) {
return {};
}
dependencies.push_back((*subexpression)->ExtractRecursiveProgram().step);
}
return dependencies;
}
Subexpression::~Subexpression() {
auto map_ptr = subprogram_map_.lock();
if (map_ptr == nullptr) {
return;
}
auto it = map_ptr->find(self_);
if (it != map_ptr->end() && it->second == this) {
map_ptr->erase(it);
}
}
std::unique_ptr<Subexpression> Subexpression::ExtractChild(
Subexpression* child) {
if (IsFlattened()) {
return nullptr;
}
for (auto iter = elements().begin(); iter != elements().end(); ++iter) {
Subexpression::Element& element = *iter;
if (!absl::holds_alternative<std::unique_ptr<Subexpression>>(element)) {
continue;
}
auto& subexpression_owner =
absl::get<std::unique_ptr<Subexpression>>(element);
if (subexpression_owner.get() != child) {
continue;
}
std::unique_ptr<Subexpression> result = std::move(subexpression_owner);
elements().erase(iter);
return result;
}
return nullptr;
}
int Subexpression::CalculateOffset(int base, int target) const {
ABSL_DCHECK(!IsFlattened());
ABSL_DCHECK(!IsRecursive());
ABSL_DCHECK_GE(base, 0);
ABSL_DCHECK_GE(target, 0);
ABSL_DCHECK_LE(base, elements().size());
ABSL_DCHECK_LE(target, elements().size());
int sign = 1;
if (target <= base) {
int tmp = base;
base = target - 1;
target = tmp + 1;
sign = -1;
}
int sum = 0;
for (int i = base + 1; i < target; ++i) {
const auto& element = elements()[i];
if (auto* subexpr = absl::get_if<std::unique_ptr<Subexpression>>(&element);
subexpr != nullptr) {
sum += (*subexpr)->ComputeSize();
} else {
sum += 1;
}
}
return sign * sum;
}
void Subexpression::Flatten() {
struct Record {
Subexpression* subexpr;
size_t offset;
};
if (IsFlattened()) {
return;
}
std::vector<std::unique_ptr<const ExpressionStep>> flat;
std::vector<Record> flatten_stack;
flatten_stack.push_back({this, 0});
while (!flatten_stack.empty()) {
Record top = flatten_stack.back();
flatten_stack.pop_back();
size_t offset = top.offset;
auto* subexpr = top.subexpr;
if (subexpr->IsFlattened()) {
absl::c_move(subexpr->flattened_elements(), std::back_inserter(flat));
continue;
} else if (subexpr->IsRecursive()) {
flat.push_back(std::make_unique<WrappedDirectStep>(
std::move(subexpr->ExtractRecursiveProgram().step),
subexpr->self_->id()));
}
size_t size = subexpr->elements().size();
size_t i = offset;
for (; i < size; ++i) {
auto& element = subexpr->elements()[i];
if (auto* child = absl::get_if<std::unique_ptr<Subexpression>>(&element);
child != nullptr) {
flatten_stack.push_back({subexpr, i + 1});
flatten_stack.push_back({child->get(), 0});
break;
} else if (auto* step =
absl::get_if<std::unique_ptr<ExpressionStep>>(&element);
step != nullptr) {
flat.push_back(std::move(*step));
}
}
if (i >= size && subexpr != this) {
subexpr->program_.emplace<std::vector<Subexpression::Element>>();
}
}
program_ = std::move(flat);
}
Subexpression::RecursiveProgram Subexpression::ExtractRecursiveProgram() {
ABSL_DCHECK(IsRecursive());
auto result = std::move(absl::get<RecursiveProgram>(program_));
program_.emplace<std::vector<Subexpression::Element>>();
return result;
}
bool Subexpression::ExtractTo(
std::vector<std::unique_ptr<const ExpressionStep>>& out) {
if (!IsFlattened()) {
return false;
}
out.reserve(out.size() + flattened_elements().size());
absl::c_move(flattened_elements(), std::back_inserter(out));
program_.emplace<std::vector<Element>>();
return true;
}
std::vector<std::unique_ptr<const ExpressionStep>>
ProgramBuilder::FlattenSubexpression(std::unique_ptr<Subexpression> expr) {
std::vector<std::unique_ptr<const ExpressionStep>> out;
if (!expr) {
return out;
}
expr->Flatten();
expr->ExtractTo(out);
return out;
}
ProgramBuilder::ProgramBuilder()
: root_(nullptr),
current_(nullptr),
subprogram_map_(std::make_shared<SubprogramMap>()) {}
ExecutionPath ProgramBuilder::FlattenMain() {
auto out = FlattenSubexpression(std::move(root_));
return out;
}
std::vector<ExecutionPath> ProgramBuilder::FlattenSubexpressions() {
std::vector<ExecutionPath> out;
out.reserve(extracted_subexpressions_.size());
for (auto& subexpression : extracted_subexpressions_) {
out.push_back(FlattenSubexpression(std::move(subexpression)));
}
extracted_subexpressions_.clear();
return out;
}
absl::Nullable<Subexpression*> ProgramBuilder::EnterSubexpression(
const cel::ast_internal::Expr* expr) {
std::unique_ptr<Subexpression> subexpr = MakeSubexpression(expr);
auto* result = subexpr.get();
if (current_ == nullptr) {
root_ = std::move(subexpr);
current_ = result;
return result;
}
current_->AddSubexpression(std::move(subexpr));
result->parent_ = current_->self_;
current_ = result;
return result;
}
absl::Nullable<Subexpression*> ProgramBuilder::ExitSubexpression(
const cel::ast_internal::Expr* expr) {
ABSL_DCHECK(expr == current_->self_);
ABSL_DCHECK(GetSubexpression(expr) == current_);
MaybeReassignChildRecursiveProgram(current_);
Subexpression* result = GetSubexpression(current_->parent_);
ABSL_DCHECK(result != nullptr || current_ == root_.get());
current_ = result;
return result;
}
absl::Nullable<Subexpression*> ProgramBuilder::GetSubexpression(
const cel::ast_internal::Expr* expr) {
auto it = subprogram_map_->find(expr);
if (it == subprogram_map_->end()) {
return nullptr;
}
return it->second;
}
void ProgramBuilder::AddStep(std::unique_ptr<ExpressionStep> step) {
if (current_ == nullptr) {
return;
}
current_->AddStep(std::move(step));
}
int ProgramBuilder::ExtractSubexpression(const cel::ast_internal::Expr* expr) {
auto it = subprogram_map_->find(expr);
if (it == subprogram_map_->end()) {
return -1;
}
auto* subexpression = it->second;
auto parent_it = subprogram_map_->find(subexpression->parent_);
if (parent_it == subprogram_map_->end()) {
return -1;
}
auto* parent = parent_it->second;
std::unique_ptr<Subexpression> subexpression_owner =
parent->ExtractChild(subexpression);
if (subexpression_owner == nullptr) {
return -1;
}
extracted_subexpressions_.push_back(std::move(subexpression_owner));
return extracted_subexpressions_.size() - 1;
}
std::unique_ptr<Subexpression> ProgramBuilder::MakeSubexpression(
const cel::ast_internal::Expr* expr) {
auto* subexpr = new Subexpression(expr, this);
(*subprogram_map_)[expr] = subexpr;
return absl::WrapUnique(subexpr);
}
bool PlannerContext::IsSubplanInspectable(
const cel::ast_internal::Expr& node) const {
return program_builder_.GetSubexpression(&node) != nullptr;
}
ExecutionPathView PlannerContext::GetSubplan(
const cel::ast_internal::Expr& node) {
auto* subexpression = program_builder_.GetSubexpression(&node);
if (subexpression == nullptr) {
return ExecutionPathView();
}
subexpression->Flatten();
return subexpression->flattened_elements();
}
absl::StatusOr<ExecutionPath> PlannerContext::ExtractSubplan(
const cel::ast_internal::Expr& node) {
auto* subexpression = program_builder_.GetSubexpression(&node);
if (subexpression == nullptr) {
return absl::InternalError(
"attempted to update program step for untracked expr node");
}
subexpression->Flatten();
ExecutionPath out;
subexpression->ExtractTo(out);
return out;
}
absl::Status PlannerContext::ReplaceSubplan(const cel::ast_internal::Expr& node,
ExecutionPath path) {
auto* subexpression = program_builder_.GetSubexpression(&node);
if (subexpression == nullptr) {
return absl::InternalError(
"attempted to update program step for untracked expr node");
}
if (!subexpression->IsFlattened()) {
subexpression->Flatten();
}
subexpression->flattened_elements() = std::move(path);
return absl::OkStatus();
}
absl::Status PlannerContext::ReplaceSubplan(
const cel::ast_internal::Expr& node,
std::unique_ptr<DirectExpressionStep> step, int depth) {
auto* subexpression = program_builder_.GetSubexpression(&node);
if (subexpression == nullptr) {
return absl::InternalError(
"attempted to update program step for untracked expr node");
}
subexpression->set_recursive_program(std::move(step), depth);
return absl::OkStatus();
}
absl::Status PlannerContext::AddSubplanStep(
const cel::ast_internal::Expr& node, std::unique_ptr<ExpressionStep> step) {
auto* subexpression = program_builder_.GetSubexpression(&node);
if (subexpression == nullptr) {
return absl::InternalError(
"attempted to update program step for untracked expr node");
}
subexpression->AddStep(std::move(step));
return absl::OkStatus();
}
} | #include "eval/compiler/flat_expr_builder_extensions.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/ast_internal/expr.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/compiler/resolver.h"
#include "eval/eval/const_value_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/function_step.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/function_registry.h"
#include "runtime/internal/issue_collector.h"
#include "runtime/runtime_issue.h"
#include "runtime/runtime_options.h"
#include "runtime/type_registry.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::RuntimeIssue;
using ::cel::ast_internal::Expr;
using ::cel::runtime_internal::IssueCollector;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Optional;
using Subexpression = ProgramBuilder::Subexpression;
class PlannerContextTest : public testing::Test {
public:
PlannerContextTest()
: type_registry_(),
function_registry_(),
value_factory_(cel::MemoryManagerRef::ReferenceCounting(),
type_registry_.GetComposedTypeProvider()),
resolver_("", function_registry_, type_registry_, value_factory_,
type_registry_.resolveable_enums()),
issue_collector_(RuntimeIssue::Severity::kError) {}
protected:
cel::TypeRegistry type_registry_;
cel::FunctionRegistry function_registry_;
cel::RuntimeOptions options_;
cel::common_internal::LegacyValueManager value_factory_;
Resolver resolver_;
IssueCollector issue_collector_;
};
MATCHER_P(UniquePtrHolds, ptr, "") {
const auto& got = arg;
return ptr == got.get();
}
struct SimpleTreeSteps {
const ExpressionStep* a;
const ExpressionStep* b;
const ExpressionStep* c;
};
absl::StatusOr<SimpleTreeSteps> InitSimpleTree(
const Expr& a, const Expr& b, const Expr& c,
cel::ValueManager& value_factory, ProgramBuilder& program_builder) {
CEL_ASSIGN_OR_RETURN(auto a_step,
CreateConstValueStep(value_factory.GetNullValue(), -1));
CEL_ASSIGN_OR_RETURN(auto b_step,
CreateConstValueStep(value_factory.GetNullValue(), -1));
CEL_ASSIGN_OR_RETURN(auto c_step,
CreateConstValueStep(value_factory.GetNullValue(), -1));
SimpleTreeSteps result{a_step.get(), b_step.get(), c_step.get()};
program_builder.EnterSubexpression(&a);
program_builder.EnterSubexpression(&b);
program_builder.AddStep(std::move(b_step));
program_builder.ExitSubexpression(&b);
program_builder.EnterSubexpression(&c);
program_builder.AddStep(std::move(c_step));
program_builder.ExitSubexpression(&c);
program_builder.AddStep(std::move(a_step));
program_builder.ExitSubexpression(&a);
return result;
}
TEST_F(PlannerContextTest, GetPlan) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(
auto step_ptrs, InitSimpleTree(a, b, c, value_factory_, program_builder));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
EXPECT_THAT(context.GetSubplan(b), ElementsAre(UniquePtrHolds(step_ptrs.b)));
EXPECT_THAT(context.GetSubplan(c), ElementsAre(UniquePtrHolds(step_ptrs.c)));
EXPECT_THAT(context.GetSubplan(a), ElementsAre(UniquePtrHolds(step_ptrs.b),
UniquePtrHolds(step_ptrs.c),
UniquePtrHolds(step_ptrs.a)));
Expr d;
EXPECT_FALSE(context.IsSubplanInspectable(d));
EXPECT_THAT(context.GetSubplan(d), IsEmpty());
}
TEST_F(PlannerContextTest, ReplacePlan) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(
auto step_ptrs, InitSimpleTree(a, b, c, value_factory_, program_builder));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
EXPECT_THAT(context.GetSubplan(a), ElementsAre(UniquePtrHolds(step_ptrs.b),
UniquePtrHolds(step_ptrs.c),
UniquePtrHolds(step_ptrs.a)));
ExecutionPath new_a;
ASSERT_OK_AND_ASSIGN(auto new_a_step,
CreateConstValueStep(value_factory_.GetNullValue(), -1));
const ExpressionStep* new_a_step_ptr = new_a_step.get();
new_a.push_back(std::move(new_a_step));
ASSERT_OK(context.ReplaceSubplan(a, std::move(new_a)));
EXPECT_THAT(context.GetSubplan(a),
ElementsAre(UniquePtrHolds(new_a_step_ptr)));
EXPECT_THAT(context.GetSubplan(b), IsEmpty());
}
TEST_F(PlannerContextTest, ExtractPlan) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(auto plan_steps, InitSimpleTree(a, b, c, value_factory_,
program_builder));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
EXPECT_TRUE(context.IsSubplanInspectable(a));
EXPECT_TRUE(context.IsSubplanInspectable(b));
ASSERT_OK_AND_ASSIGN(ExecutionPath extracted, context.ExtractSubplan(b));
EXPECT_THAT(extracted, ElementsAre(UniquePtrHolds(plan_steps.b)));
}
TEST_F(PlannerContextTest, ExtractFailsOnReplacedNode) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK(InitSimpleTree(a, b, c, value_factory_, program_builder).status());
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
ASSERT_OK(context.ReplaceSubplan(a, {}));
EXPECT_THAT(context.ExtractSubplan(b), StatusIs(absl::StatusCode::kInternal));
}
TEST_F(PlannerContextTest, ReplacePlanUpdatesParent) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(auto plan_steps, InitSimpleTree(a, b, c, value_factory_,
program_builder));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
EXPECT_TRUE(context.IsSubplanInspectable(a));
ASSERT_OK(context.ReplaceSubplan(c, {}));
EXPECT_THAT(context.GetSubplan(a), ElementsAre(UniquePtrHolds(plan_steps.b),
UniquePtrHolds(plan_steps.a)));
EXPECT_THAT(context.GetSubplan(c), IsEmpty());
}
TEST_F(PlannerContextTest, ReplacePlanUpdatesSibling) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(auto plan_steps, InitSimpleTree(a, b, c, value_factory_,
program_builder));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
ExecutionPath new_b;
ASSERT_OK_AND_ASSIGN(auto b1_step,
CreateConstValueStep(value_factory_.GetNullValue(), -1));
const ExpressionStep* b1_step_ptr = b1_step.get();
new_b.push_back(std::move(b1_step));
ASSERT_OK_AND_ASSIGN(auto b2_step,
CreateConstValueStep(value_factory_.GetNullValue(), -1));
const ExpressionStep* b2_step_ptr = b2_step.get();
new_b.push_back(std::move(b2_step));
ASSERT_OK(context.ReplaceSubplan(b, std::move(new_b)));
EXPECT_THAT(context.GetSubplan(c), ElementsAre(UniquePtrHolds(plan_steps.c)));
EXPECT_THAT(context.GetSubplan(b), ElementsAre(UniquePtrHolds(b1_step_ptr),
UniquePtrHolds(b2_step_ptr)));
EXPECT_THAT(
context.GetSubplan(a),
ElementsAre(UniquePtrHolds(b1_step_ptr), UniquePtrHolds(b2_step_ptr),
UniquePtrHolds(plan_steps.c), UniquePtrHolds(plan_steps.a)));
}
TEST_F(PlannerContextTest, ReplacePlanFailsOnUpdatedNode) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(auto plan_steps, InitSimpleTree(a, b, c, value_factory_,
program_builder));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
EXPECT_THAT(context.GetSubplan(a), ElementsAre(UniquePtrHolds(plan_steps.b),
UniquePtrHolds(plan_steps.c),
UniquePtrHolds(plan_steps.a)));
ASSERT_OK(context.ReplaceSubplan(a, {}));
EXPECT_THAT(context.ReplaceSubplan(b, {}),
StatusIs(absl::StatusCode::kInternal));
}
TEST_F(PlannerContextTest, AddSubplanStep) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(auto plan_steps, InitSimpleTree(a, b, c, value_factory_,
program_builder));
ASSERT_OK_AND_ASSIGN(auto b2_step,
CreateConstValueStep(value_factory_.GetNullValue(), -1));
const ExpressionStep* b2_step_ptr = b2_step.get();
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
ASSERT_OK(context.AddSubplanStep(b, std::move(b2_step)));
EXPECT_THAT(context.GetSubplan(b), ElementsAre(UniquePtrHolds(plan_steps.b),
UniquePtrHolds(b2_step_ptr)));
EXPECT_THAT(context.GetSubplan(c), ElementsAre(UniquePtrHolds(plan_steps.c)));
EXPECT_THAT(
context.GetSubplan(a),
ElementsAre(UniquePtrHolds(plan_steps.b), UniquePtrHolds(b2_step_ptr),
UniquePtrHolds(plan_steps.c), UniquePtrHolds(plan_steps.a)));
}
TEST_F(PlannerContextTest, AddSubplanStepFailsOnUnknownNode) {
Expr a;
Expr b;
Expr c;
Expr d;
ProgramBuilder program_builder;
ASSERT_OK(InitSimpleTree(a, b, c, value_factory_, program_builder).status());
ASSERT_OK_AND_ASSIGN(auto b2_step,
CreateConstValueStep(value_factory_.GetNullValue(), -1));
PlannerContext context(resolver_, options_, value_factory_, issue_collector_,
program_builder);
EXPECT_THAT(context.GetSubplan(d), IsEmpty());
EXPECT_THAT(context.AddSubplanStep(d, std::move(b2_step)),
StatusIs(absl::StatusCode::kInternal));
}
class ProgramBuilderTest : public testing::Test {
public:
ProgramBuilderTest()
: type_registry_(),
function_registry_(),
value_factory_(cel::MemoryManagerRef::ReferenceCounting(),
type_registry_.GetComposedTypeProvider()) {}
protected:
cel::TypeRegistry type_registry_;
cel::FunctionRegistry function_registry_;
cel::common_internal::LegacyValueManager value_factory_;
};
TEST_F(ProgramBuilderTest, ExtractSubexpression) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(
SimpleTreeSteps step_ptrs,
InitSimpleTree(a, b, c, value_factory_, program_builder));
EXPECT_EQ(program_builder.ExtractSubexpression(&c), 0);
EXPECT_EQ(program_builder.ExtractSubexpression(&b), 1);
EXPECT_THAT(program_builder.FlattenMain(),
ElementsAre(UniquePtrHolds(step_ptrs.a)));
EXPECT_THAT(program_builder.FlattenSubexpressions(),
ElementsAre(ElementsAre(UniquePtrHolds(step_ptrs.c)),
ElementsAre(UniquePtrHolds(step_ptrs.b))));
}
TEST_F(ProgramBuilderTest, FlattenRemovesChildrenReferences) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
program_builder.EnterSubexpression(&a);
program_builder.EnterSubexpression(&b);
program_builder.EnterSubexpression(&c);
program_builder.ExitSubexpression(&c);
program_builder.ExitSubexpression(&b);
program_builder.ExitSubexpression(&a);
auto subexpr_b = program_builder.GetSubexpression(&b);
ASSERT_TRUE(subexpr_b != nullptr);
subexpr_b->Flatten();
EXPECT_EQ(program_builder.GetSubexpression(&c), nullptr);
}
TEST_F(ProgramBuilderTest, ExtractReturnsNullOnFlattendExpr) {
Expr a;
Expr b;
ProgramBuilder program_builder;
program_builder.EnterSubexpression(&a);
program_builder.EnterSubexpression(&b);
program_builder.ExitSubexpression(&b);
program_builder.ExitSubexpression(&a);
auto* subexpr_a = program_builder.GetSubexpression(&a);
auto* subexpr_b = program_builder.GetSubexpression(&b);
ASSERT_TRUE(subexpr_a != nullptr);
ASSERT_TRUE(subexpr_b != nullptr);
subexpr_a->Flatten();
EXPECT_EQ(subexpr_a->ExtractChild(subexpr_b), nullptr);
EXPECT_EQ(program_builder.ExtractSubexpression(&b), -1);
}
TEST_F(ProgramBuilderTest, ExtractReturnsNullOnNonChildren) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
program_builder.EnterSubexpression(&a);
program_builder.EnterSubexpression(&b);
program_builder.EnterSubexpression(&c);
program_builder.ExitSubexpression(&c);
program_builder.ExitSubexpression(&b);
program_builder.ExitSubexpression(&a);
auto* subexpr_a = program_builder.GetSubexpression(&a);
auto* subexpr_c = program_builder.GetSubexpression(&c);
ASSERT_TRUE(subexpr_a != nullptr);
ASSERT_TRUE(subexpr_c != nullptr);
EXPECT_EQ(subexpr_a->ExtractChild(subexpr_c), nullptr);
}
TEST_F(ProgramBuilderTest, ExtractWorks) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
program_builder.EnterSubexpression(&a);
program_builder.EnterSubexpression(&b);
program_builder.ExitSubexpression(&b);
ASSERT_OK_AND_ASSIGN(auto a_step,
CreateConstValueStep(value_factory_.GetNullValue(), -1));
program_builder.AddStep(std::move(a_step));
program_builder.EnterSubexpression(&c);
program_builder.ExitSubexpression(&c);
program_builder.ExitSubexpression(&a);
auto* subexpr_a = program_builder.GetSubexpression(&a);
auto* subexpr_c = program_builder.GetSubexpression(&c);
ASSERT_TRUE(subexpr_a != nullptr);
ASSERT_TRUE(subexpr_c != nullptr);
EXPECT_THAT(subexpr_a->ExtractChild(subexpr_c), UniquePtrHolds(subexpr_c));
}
TEST_F(ProgramBuilderTest, ExtractToRequiresFlatten) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
ASSERT_OK_AND_ASSIGN(
SimpleTreeSteps step_ptrs,
InitSimpleTree(a, b, c, value_factory_, program_builder));
auto* subexpr_a = program_builder.GetSubexpression(&a);
ExecutionPath path;
EXPECT_FALSE(subexpr_a->ExtractTo(path));
subexpr_a->Flatten();
EXPECT_TRUE(subexpr_a->ExtractTo(path));
EXPECT_THAT(path, ElementsAre(UniquePtrHolds(step_ptrs.b),
UniquePtrHolds(step_ptrs.c),
UniquePtrHolds(step_ptrs.a)));
}
TEST_F(ProgramBuilderTest, Recursive) {
Expr a;
Expr b;
Expr c;
ProgramBuilder program_builder;
program_builder.EnterSubexpression(&a);
program_builder.EnterSubexpression(&b);
program_builder.current()->set_recursive_program(
CreateConstValueDirectStep(value_factory_.GetNullValue()), 1);
program_builder.ExitSubexpression(&b);
program_builder.EnterSubexpression(&c);
program_builder.current()->set_recursive_program(
CreateConstValueDirectStep(value_factory_.GetNullValue()), 1);
program_builder.ExitSubexpression(&c);
ASSERT_FALSE(program_builder.current()->IsFlattened());
ASSERT_FALSE(program_builder.current()->IsRecursive());
ASSERT_TRUE(program_builder.GetSubexpression(&b)->IsRecursive());
ASSERT_TRUE(program_builder.GetSubexpression(&c)->IsRecursive());
EXPECT_EQ(program_builder.GetSubexpression(&b)->recursive_program().depth, 1);
EXPECT_EQ(program_builder.GetSubexpression(&c)->recursive_program().depth, 1);
cel::ast_internal::Call call_expr;
call_expr.set_function("_==_");
call_expr.mutable_args().emplace_back();
call_expr.mutable_args().emplace_back();
auto max_depth = program_builder.current()->RecursiveDependencyDepth();
EXPECT_THAT(max_depth, Optional(1));
auto deps = program_builder.current()->ExtractRecursiveDependencies();
program_builder.current()->set_recursive_program(
CreateDirectFunctionStep(-1, call_expr, std::move(deps), {}),
*max_depth + 1);
program_builder.ExitSubexpression(&a);
auto path = program_builder.FlattenMain();
ASSERT_THAT(path, testing::SizeIs(1));
EXPECT_TRUE(path[0]->GetNativeTypeId() ==
cel::NativeTypeId::For<WrappedDirectStep>());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/flat_expr_builder_extensions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/flat_expr_builder_extensions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
73a594f2-3fb4-4581-a0fc-baaa11a9b95c | cpp | tensorflow/tensorflow | hlo_casting_utils | third_party/xla/xla/hlo/ir/hlo_casting_utils.h | third_party/xla/xla/service/hlo_casting_utils_test.cc | #ifndef XLA_HLO_IR_HLO_CASTING_UTILS_H_
#define XLA_HLO_IR_HLO_CASTING_UTILS_H_
#include <type_traits>
#include "xla/hlo/ir/hlo_instruction.h"
#include "tsl/platform/logging.h"
namespace xla {
template <class T>
using EnableIfDerivedFromHlo =
typename std::enable_if<std::is_base_of<HloInstruction, T>::value>::type;
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* Cast(const HloInstruction* instruction) {
CHECK(instruction != nullptr);
CHECK(T::ClassOf(instruction))
<< "Invalid HloInstruction casting. Destination type: "
<< typeid(T).name() << ". Instruction: " << instruction->name();
const T* casted = static_cast<const T*>(instruction);
#ifndef NDEBUG
const T* dynamic_casted = dynamic_cast<const T*>(instruction);
CHECK(dynamic_casted != nullptr)
<< "Invalid HloInstruction casting. Destination type: "
<< typeid(T).name() << ". Instruction: " << instruction->name();
#endif
return casted;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* Cast(HloInstruction* instruction) {
return const_cast<T*>(
Cast<T>(const_cast<const HloInstruction*>(instruction)));
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* CastOrNull(const HloInstruction* instruction) {
return instruction != nullptr ? Cast<T>(instruction) : nullptr;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* CastOrNull(HloInstruction* instruction) {
return const_cast<T*>(
CastOrNull<T>(const_cast<const HloInstruction*>(instruction)));
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* DynCast(const HloInstruction* instruction) {
CHECK(instruction != nullptr);
const T* casted =
T::ClassOf(instruction) ? static_cast<const T*>(instruction) : nullptr;
#ifndef NDEBUG
CHECK_EQ(casted, dynamic_cast<const T*>(instruction));
#endif
return casted;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* DynCast(HloInstruction* instruction) {
return const_cast<T*>(
DynCast<T>(const_cast<const HloInstruction*>(instruction)));
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* DynCastOrNull(const HloInstruction* instruction) {
return instruction != nullptr ? DynCast<T>(instruction) : nullptr;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* DynCastOrNull(HloInstruction* instruction) {
return const_cast<T*>(
DynCastOrNull<T>(const_cast<const HloInstruction*>(instruction)));
}
}
#endif | #include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class DummyInstruction : public HloInstruction {
public:
DummyInstruction()
: HloInstruction(HloOpcode::kConstant, ShapeUtil::MakeShape(F32, {})) {}
static bool ClassOf(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kConstant;
}
};
class AnotherDummyInstruction : public HloInstruction {
public:
AnotherDummyInstruction()
: HloInstruction(HloOpcode::kParameter, ShapeUtil::MakeShape(F32, {})) {}
static bool ClassOf(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kParameter;
}
};
TEST(HloCastingUtilsTest, CastSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted =
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, CastDiesForWrongType) {
AnotherDummyInstruction instruction;
ASSERT_DEATH(
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction)), "");
}
TEST(HloCastingUtilsTest, CastDiesForNullptr) {
HloInstruction* null = nullptr;
ASSERT_DEATH(Cast<DummyInstruction>(null), "");
}
TEST(HloCastingUtilsTest, CastOrNullSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted =
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, CastOrNullDiesForWrongType) {
AnotherDummyInstruction instruction;
ASSERT_DEATH(
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction)), "");
}
TEST(HloCastingUtilsTest, CastOrNullReturnsNullptrForNullptr) {
HloInstruction* null = nullptr;
DummyInstruction* casted = CastOrNull<DummyInstruction>(null);
ASSERT_EQ(casted, nullptr);
}
TEST(HloCastingUtilsTest, DynCastSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted =
DynCast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, DynCastReturnsNullptrForWrongType) {
AnotherDummyInstruction instruction;
DummyInstruction* casted =
DynCast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, nullptr);
}
TEST(HloCastingUtilsTest, DynCastDiesForNullptr) {
HloInstruction* null = nullptr;
ASSERT_DEATH(DynCast<DummyInstruction>(null), "");
}
TEST(HloCastingUtilsTest, DynCastOrNullSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted = DynCastOrNull<DummyInstruction>(
static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, DynCastOrNullReturnsNullptrForWrongType) {
AnotherDummyInstruction instruction;
DummyInstruction* casted = DynCastOrNull<DummyInstruction>(
static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, nullptr);
}
TEST(HloCastingUtilsTest, DynCastOrNullReturnsNullptrForNullptr) {
HloInstruction* null = nullptr;
DummyInstruction* casted = DynCastOrNull<DummyInstruction>(null);
ASSERT_EQ(casted, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_casting_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_casting_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c59397db-329b-4522-9467-0a871a153527 | cpp | tensorflow/tensorflow | all_gather_dynamic_slice_simplifier | third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.cc | third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier_test.cc | #include "xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/service/collective_opt_utils.h"
namespace xla {
bool AllGatherDynamicSliceSimplifier::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDynamicSlice) {
return false;
}
HloDynamicSliceInstruction* dynamic_slice =
Cast<HloDynamicSliceInstruction>(instruction);
HloInstruction* operand = dynamic_slice->mutable_operand(0);
bool is_reshape = operand->opcode() == HloOpcode::kReshape;
bool is_all_gather = operand->opcode() == HloOpcode::kAllGather;
if (!is_reshape && !is_all_gather) {
return false;
}
if (is_reshape && operand->operand(0)->opcode() != HloOpcode::kAllGather) {
return false;
}
const HloModuleConfig& config = instruction->GetModule()->config();
HloAllGatherInstruction* all_gather =
is_reshape ? Cast<HloAllGatherInstruction>(operand->mutable_operand(0))
: Cast<HloAllGatherInstruction>(operand);
bool match = AllGatherDynamicSliceCancellation(
all_gather, config.num_partitions(), config.replica_count(),
true,
true, 1,
HloPredicateIsOp<HloOpcode::kPartitionId>,
HloPredicateIsOp<HloOpcode::kReplicaId>,
false,
true);
return match;
}
absl::StatusOr<HloInstruction*>
AllGatherDynamicSliceSimplifier::ExpandInstruction(
HloInstruction* instruction) {
HloDynamicSliceInstruction* dynamic_slice =
Cast<HloDynamicSliceInstruction>(instruction);
HloInstruction* operand = dynamic_slice->mutable_operand(0);
if (operand->opcode() != HloOpcode::kReshape) {
return operand->mutable_operand(0);
}
HloReshapeInstruction* reshape = Cast<HloReshapeInstruction>(operand);
HloAllGatherInstruction* all_gather =
Cast<HloAllGatherInstruction>(reshape->mutable_operand(0));
HloInstruction* all_gather_input = all_gather->mutable_operand(0);
auto* new_reshape = instruction->parent()->AddInstruction(
HloInstruction::CreateReshape(dynamic_slice->shape(), all_gather_input));
return new_reshape;
}
} | #include "xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::Matcher;
namespace op = xla::testing::opcode_matchers;
class AllGatherDynamicSliceSimplifierTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions, bool expect_change) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
auto changed = AllGatherDynamicSliceSimplifier().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return std::move(module);
}
};
TEST_F(AllGatherDynamicSliceSimplifierTest, AllPartitions) {
absl::string_view hlo_string = R"(
HloModule AllGather
ENTRY %AllGather {
%param = f32[32,8,128]{2,1,0} parameter(0)
%ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}},
dimensions={0}, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_s32 = s32[] convert(%pid)
%slice_size = s32[] constant(32)
%offset = s32[] multiply(%pid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %ds = f32[32,8,128]{2,1,0} dynamic-slice(%ag, %offset, %zero, %zero),
dynamic_slice_sizes={32,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Parameter(0));
}
TEST_F(AllGatherDynamicSliceSimplifierTest, AllReplicasWithReshape) {
absl::string_view hlo_string = R"(
HloModule AllGather
ENTRY %AllGather {
%param = f32[32,8,128]{2,1,0} parameter(0)
%ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}},
dimensions={0}, channel_id=1, use_global_device_ids=true
%reshape = f32[256,8,64,2]{3,2,1,0} reshape(%ag)
%pid = u32[] partition-id()
%pid_s32 = s32[] convert(%pid)
%slice_size = s32[] constant(32)
%offset = s32[] multiply(%pid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %ds = f32[32,8,64,2]{3,2,1,0} dynamic-slice(%reshape, %offset, %zero, %zero, %zero),
dynamic_slice_sizes={32,8,64,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(op::Parameter(0)));
}
TEST_F(AllGatherDynamicSliceSimplifierTest,
AllPartitionsWithReshapeOnSliceDim) {
absl::string_view hlo_string = R"(
HloModule AllGather
ENTRY %AllGather {
%param = f32[32,8,128]{2,1,0} parameter(0)
%ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}},
dimensions={0}, channel_id=1, use_global_device_ids=true
%reshape = f32[2048,128]{1,0} reshape(%ag)
%pid = u32[] partition-id()
%pid_s32 = s32[] convert(%pid)
%slice_size = s32[] constant(256)
%offset = s32[] multiply(%pid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %ds = f32[256,128]{1,0} dynamic-slice(%reshape, %offset, %zero),
dynamic_slice_sizes={256,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
false));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(
op::Reshape(op::AllGather(op::Parameter(0))),
op::Multiply(op::Convert(op::PartitionId()), op::Constant()),
op::Constant()));
}
TEST_F(AllGatherDynamicSliceSimplifierTest, NoAllGather) {
absl::string_view hlo_string = R"(
HloModule NoAllGather
ENTRY %NoAllGather {
%param = f32[32,8,128]{2,1,0} parameter(0)
%pid = u32[] partition-id()
%pid_s32 = s32[] convert(%pid)
%slice_size = s32[] constant(32)
%offset = s32[] multiply(%pid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %ds = f32[32,8,128]{2,1,0} dynamic-slice(%param, %offset, %zero, %zero),
dynamic_slice_sizes={32,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
1,
false));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(
op::Parameter(0),
op::Multiply(op::Convert(op::PartitionId()), op::Constant()),
op::Constant(), op::Constant()));
}
TEST_F(AllGatherDynamicSliceSimplifierTest, IncorrectAllGatherDimension) {
absl::string_view hlo_string = R"(
HloModule IncorrectAllGatherDimension
ENTRY %IncorrectAllGatherDimension {
%param = f32[32,8,128]{2,1,0} parameter(0)
%ag = f32[32,64,128]{2,1,0} all-gather(%param), replica_groups={},
dimensions={1}, channel_id=1
%pid = u32[] partition-id()
%pid_s32 = s32[] convert(%pid)
%slice_size = s32[] constant(8)
%offset = s32[] multiply(%pid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %ds = f32[32,8,128]{2,1,0} dynamic-slice(%ag, %zero, %offset, %zero),
dynamic_slice_sizes={32,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(
op::AllGather(op::Parameter(0)), op::Constant(),
op::Multiply(op::Convert(op::PartitionId()), op::Constant()),
op::Constant()));
}
TEST_F(AllGatherDynamicSliceSimplifierTest,
AllReplicasWithReshapeMultipleUsers) {
absl::string_view hlo_string = R"(
HloModule AllGather
ENTRY %AllGather {
%param = f32[32,8,128]{2,1,0} parameter(0)
%ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}},
dimensions={0}, channel_id=1, use_global_device_ids=true
%reshape = f32[256,8,64,2]{3,2,1,0} reshape(%ag)
%pid = u32[] partition-id()
%pid_s32 = s32[] convert(%pid)
%slice_size = s32[] constant(32)
%offset = s32[] multiply(%pid_s32, %slice_size)
%zero = s32[] constant(0)
%ds = f32[32,8,64,2]{3,2,1,0} dynamic-slice(%reshape, %offset, %zero, %zero, %zero),
dynamic_slice_sizes={32,8,64,2}
ROOT %tuple = (f32[32,8,64,2]{3,2,1,0}, f32[256,8,128]{2,1,0}) tuple(%ds, %ag)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Reshape(op::Parameter(0)),
op::AllGather(op::Parameter(0))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1a3554d7-3320-47c5-846e-3dba1ae4068b | cpp | google/tensorstore | masked_array | tensorstore/internal/masked_array.cc | tensorstore/internal/masked_array_test.cc | #include "tensorstore/internal/masked_array.h"
#include <algorithm>
#include <cassert>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
struct SetMask {
void operator()(bool* x, void*) const { *x = true; }
};
struct SetMaskAndCountChanged {
Index num_changed = 0;
void operator()(bool* x) {
if (!*x) {
++num_changed;
*x = true;
}
}
};
bool IsHullEqualToUnion(BoxView<> a, BoxView<> b) {
assert(a.rank() == b.rank());
Index hull_num_elements = 1, a_num_elements = 1, b_num_elements = 1,
intersection_num_elements = 1;
for (DimensionIndex i = 0; i < a.rank(); ++i) {
IndexInterval a_interval = a[i], b_interval = b[i];
IndexInterval hull = Hull(a_interval, b_interval);
IndexInterval intersection = Intersect(a_interval, b_interval);
hull_num_elements *= hull.size();
a_num_elements *= a_interval.size();
b_num_elements *= b_interval.size();
intersection_num_elements *= intersection.size();
}
return (hull_num_elements ==
a_num_elements + b_num_elements - intersection_num_elements);
}
void Hull(BoxView<> a, BoxView<> b, MutableBoxView<> out) {
const DimensionIndex rank = out.rank();
assert(a.rank() == rank && b.rank() == rank);
for (DimensionIndex i = 0; i < rank; ++i) {
out[i] = Hull(a[i], b[i]);
}
}
void Intersect(BoxView<> a, BoxView<> b, MutableBoxView<> out) {
const DimensionIndex rank = out.rank();
assert(a.rank() == rank && b.rank() == rank);
for (DimensionIndex i = 0; i < rank; ++i) {
out[i] = Intersect(a[i], b[i]);
}
}
Index GetRelativeOffset(tensorstore::span<const Index> base,
tensorstore::span<const Index> position,
tensorstore::span<const Index> strides) {
const DimensionIndex rank = base.size();
assert(rank == position.size());
assert(rank == strides.size());
Index result = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
result = internal::wrap_on_overflow::Add(
result, internal::wrap_on_overflow::Multiply(
strides[i], internal::wrap_on_overflow::Subtract(
position[i], base[i])));
}
return result;
}
void RemoveMaskArrayIfNotNeeded(MaskData* mask) {
if (mask->num_masked_elements == mask->region.num_elements()) {
mask->mask_array.reset();
}
}
}
MaskData::MaskData(DimensionIndex rank) : region(rank) {
region.Fill(IndexInterval::UncheckedSized(0, 0));
}
std::unique_ptr<bool[], FreeDeleter> CreateMaskArray(
BoxView<> box, BoxView<> mask_region,
tensorstore::span<const Index> byte_strides) {
std::unique_ptr<bool[], FreeDeleter> result(
static_cast<bool*>(std::calloc(box.num_elements(), sizeof(bool))));
ByteStridedPointer<bool> start = result.get();
start += GetRelativeOffset(box.origin(), mask_region.origin(), byte_strides);
internal::IterateOverArrays(
internal::SimpleElementwiseFunction<SetMask(bool), void*>{},
nullptr,
skip_repeated_elements,
ArrayView<bool>(start.get(),
StridedLayoutView<>(mask_region.shape(), byte_strides)));
return result;
}
void CreateMaskArrayFromRegion(BoxView<> box, MaskData* mask,
tensorstore::span<const Index> byte_strides) {
assert(mask->num_masked_elements == mask->region.num_elements());
mask->mask_array = CreateMaskArray(box, mask->region, byte_strides);
}
void UnionMasks(BoxView<> box, MaskData* mask_a, MaskData* mask_b) {
assert(mask_a != mask_b);
if (mask_a->num_masked_elements == 0) {
std::swap(*mask_a, *mask_b);
return;
} else if (mask_b->num_masked_elements == 0) {
return;
}
const DimensionIndex rank = box.rank();
assert(mask_a->region.rank() == rank);
assert(mask_b->region.rank() == rank);
if (mask_a->mask_array && mask_b->mask_array) {
const Index size = box.num_elements();
mask_a->num_masked_elements = 0;
for (Index i = 0; i < size; ++i) {
if ((mask_a->mask_array[i] |= mask_b->mask_array[i])) {
++mask_a->num_masked_elements;
}
}
Hull(mask_a->region, mask_b->region, mask_a->region);
RemoveMaskArrayIfNotNeeded(mask_a);
return;
}
if (!mask_a->mask_array && !mask_b->mask_array) {
if (IsHullEqualToUnion(mask_a->region, mask_b->region)) {
Hull(mask_a->region, mask_b->region, mask_a->region);
mask_a->num_masked_elements = mask_a->region.num_elements();
return;
}
} else if (!mask_a->mask_array) {
std::swap(*mask_a, *mask_b);
}
Index byte_strides[kMaxRank];
const tensorstore::span<Index> byte_strides_span(&byte_strides[0], rank);
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), box.shape(),
byte_strides_span);
if (!mask_a->mask_array) {
CreateMaskArrayFromRegion(box, mask_a, byte_strides_span);
}
ByteStridedPointer<bool> start = mask_a->mask_array.get();
start += GetRelativeOffset(box.origin(), mask_b->region.origin(),
byte_strides_span);
IterateOverArrays(
[&](bool* ptr) {
if (!*ptr) ++mask_a->num_masked_elements;
*ptr = true;
},
{},
ArrayView<bool>(start.get(), StridedLayoutView<>(mask_b->region.shape(),
byte_strides_span)));
Hull(mask_a->region, mask_b->region, mask_a->region);
RemoveMaskArrayIfNotNeeded(mask_a);
}
void RebaseMaskedArray(BoxView<> box, ArrayView<const void> source,
ArrayView<void> dest, const MaskData& mask) {
assert(source.dtype() == dest.dtype());
assert(internal::RangesEqual(box.shape(), source.shape()));
assert(internal::RangesEqual(box.shape(), dest.shape()));
const Index num_elements = box.num_elements();
if (mask.num_masked_elements == num_elements) return;
DataType dtype = source.dtype();
if (mask.num_masked_elements == 0) {
[[maybe_unused]] const auto success = internal::IterateOverArrays(
{&dtype->copy_assign, nullptr},
nullptr, skip_repeated_elements, source, dest);
assert(success);
return;
}
Index mask_byte_strides_storage[kMaxRank];
const tensorstore::span<Index> mask_byte_strides(
&mask_byte_strides_storage[0], box.rank());
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), box.shape(),
mask_byte_strides);
std::unique_ptr<bool[], FreeDeleter> mask_owner;
bool* mask_array_ptr;
if (!mask.mask_array) {
mask_owner = CreateMaskArray(box, mask.region, mask_byte_strides);
mask_array_ptr = mask_owner.get();
} else {
mask_array_ptr = mask.mask_array.get();
}
ArrayView<const bool> mask_array(
mask_array_ptr, StridedLayoutView<>(box.shape(), mask_byte_strides));
[[maybe_unused]] const auto success = internal::IterateOverArrays(
{&dtype->copy_assign_unmasked, nullptr},
nullptr, skip_repeated_elements, source, dest, mask_array);
assert(success);
}
void WriteToMask(MaskData* mask, BoxView<> output_box,
IndexTransformView<> input_to_output, Arena* arena) {
assert(input_to_output.output_rank() == output_box.rank());
if (input_to_output.domain().box().is_empty()) {
return;
}
const DimensionIndex output_rank = output_box.rank();
Box<dynamic_rank(kNumInlinedDims)> output_range(output_rank);
const bool range_is_exact =
GetOutputRange(input_to_output, output_range).value();
Intersect(output_range, output_box, output_range);
Index mask_byte_strides_storage[kMaxRank];
const tensorstore::span<Index> mask_byte_strides(
&mask_byte_strides_storage[0], output_rank);
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), output_box.shape(),
mask_byte_strides);
StridedLayoutView<dynamic_rank, offset_origin> mask_layout(output_box,
mask_byte_strides);
const bool use_mask_array =
output_box.rank() != 0 &&
mask->num_masked_elements != output_box.num_elements() &&
(static_cast<bool>(mask->mask_array) ||
(!Contains(mask->region, output_range) &&
(!range_is_exact || !IsHullEqualToUnion(mask->region, output_range))));
if (use_mask_array && !mask->mask_array) {
CreateMaskArrayFromRegion(output_box, mask, mask_byte_strides);
}
Hull(mask->region, output_range, mask->region);
if (use_mask_array) {
auto mask_iterable =
GetTransformedArrayNDIterable(
ArrayView<Shared<bool>, dynamic_rank, offset_origin>(
AddByteOffset(
SharedElementPointer<bool>(
UnownedToShared(mask->mask_array.get())),
-IndexInnerProduct(output_box.origin(),
tensorstore::span(mask_byte_strides))),
mask_layout),
input_to_output, arena)
.value();
SetMaskAndCountChanged set_mask_context;
constexpr ElementwiseFunction<1> set_mask_func =
internal::SimpleElementwiseFunction<SetMaskAndCountChanged(bool)>();
auto status = internal::IterateOverNDIterables<1, true>(
input_to_output.input_shape(), skip_repeated_elements,
{{mask_iterable.get()}}, arena, {&set_mask_func, &set_mask_context});
mask->num_masked_elements += set_mask_context.num_changed;
status.IgnoreError();
assert(status.ok());
} else {
mask->num_masked_elements = mask->region.num_elements();
}
}
}
} | #include "tensorstore/internal/masked_array.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/masked_array_testutil.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::ArrayView;
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeArrayView;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::offset_origin;
using ::tensorstore::SharedArray;
using ::tensorstore::StridedLayout;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::ElementCopyFunction;
using ::tensorstore::internal::MaskData;
using ::tensorstore::internal::SimpleElementwiseFunction;
class MaskedArrayTester {
public:
explicit MaskedArrayTester(BoxView<> box)
: box_(box),
mask_(box.rank()),
mask_layout_zero_origin_(tensorstore::ContiguousLayoutOrder::c,
sizeof(bool), box.shape()) {}
ArrayView<const bool> mask_array() const {
if (!mask_.mask_array) return {};
return ArrayView<const bool>(mask_.mask_array.get(),
mask_layout_zero_origin_);
}
Index num_masked_elements() const { return mask_.num_masked_elements; }
BoxView<> mask_region() const { return mask_.region; }
const MaskData& mask() const { return mask_; }
BoxView<> domain() const { return box_; }
void Combine(MaskedArrayTester&& other) {
UnionMasks(box_, &mask_, &other.mask_);
}
void Reset() { mask_.Reset(); }
protected:
Box<> box_;
MaskData mask_;
StridedLayout<> mask_layout_zero_origin_;
};
template <typename T>
class MaskedArrayWriteTester : public MaskedArrayTester {
public:
explicit MaskedArrayWriteTester(BoxView<> box)
: MaskedArrayTester(box),
dest_(tensorstore::AllocateArray<T>(box, tensorstore::c_order,
tensorstore::value_init)),
dest_layout_zero_origin_(tensorstore::ContiguousLayoutOrder::c,
sizeof(T), box.shape()) {}
template <typename CopyFunc>
absl::Status Write(IndexTransformView<> dest_transform,
TransformedArray<const T> source, CopyFunc&& copy_func) {
ElementCopyFunction copy_function =
SimpleElementwiseFunction<std::remove_reference_t<CopyFunc>(const T, T),
void*>();
return WriteToMaskedArray(dest_.byte_strided_origin_pointer().get(), &mask_,
dest_.domain(), dest_transform, source,
{©_function, ©_func});
}
absl::Status Write(IndexTransformView<> dest_transform,
TransformedArray<const T> source) {
return Write(dest_transform, source,
[](const T* source, T* dest, void*) { *dest = *source; });
}
void Rebase(ArrayView<const T> source) {
RebaseMaskedArray(
box_, source,
tensorstore::ArrayOriginCast<tensorstore::zero_origin>(dest_).value(),
mask_);
}
IndexTransform<> transform() const {
return tensorstore::IdentityTransform(dest_.domain());
}
ArrayView<const T> dest_array() const {
return ArrayView<const T>(dest_.byte_strided_origin_pointer().get(),
dest_layout_zero_origin_);
}
private:
SharedArray<T, dynamic_rank, offset_origin> dest_;
StridedLayout<> dest_layout_zero_origin_;
};
TEST(MaskDataTest, Construct) {
MaskData mask(3);
EXPECT_FALSE(mask.mask_array);
EXPECT_EQ(0, mask.num_masked_elements);
EXPECT_EQ(0, mask.region.num_elements());
}
TEST(WriteToMaskedArrayTest, RankZero) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
TENSORSTORE_EXPECT_OK(tester.Write(tester.transform(), MakeScalarArray(5)));
EXPECT_EQ(1, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(5), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankZeroError) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
EXPECT_THAT(
tester.Write(
tester.transform(), MakeScalarArray(5),
[](const int* source, int* dest, void* status) { return false; }),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(0), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOneNoElementsWritten) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).AddNew().SizedInterval(0, 0)).value(),
MakeArrayView(tensorstore::span<const int>{})));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(0), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOne) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {10})};
TENSORSTORE_EXPECT_OK(
tester.Write((tester.transform() | Dims(0).SizedInterval(2, 3)).value(),
MakeOffsetArray({2}, {1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({0, 1, 2, 3, 0, 0, 0, 0, 0, 0}), tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(5, 2)).value(),
MakeArray({4, 5})));
EXPECT_EQ(5, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {5}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArrayView({0, 1, 2, 3, 4, 5, 0, 0, 0, 0}), tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(9, 2)).value(),
MakeArray({6, 7})));
EXPECT_EQ(7, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {9}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({0, 1, 1, 1, 1, 1, 0, 0, 1, 1}),
tester.mask_array());
EXPECT_EQ(MakeArray({0, 1, 2, 3, 4, 5, 0, 0, 6, 7}), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOneStrided) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {8})};
auto input_to_output = IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({3})
.output_single_input_dimension(0, -2, 2, 0)
.Finalize()
.value();
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).SizedInterval(2, 3, 2).TranslateTo(0))
.value(),
MakeArray({1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(MakeArray<bool>({0, 1, 0, 1, 0, 1, 0, 0}), tester.mask_array());
EXPECT_EQ(MakeArray({0, 1, 0, 2, 0, 3, 0, 0}), tester.dest_array());
EXPECT_EQ(BoxView({2}, {5}), tester.mask_region());
}
TEST(WriteToMaskedArrayTest, RankTwo) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 1, 2, 0, 0},
{0, 3, 4, 0, 0},
{0, 5, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 2}, {3, 2}))
.value(),
MakeArray({
{7, 8},
{9, 0},
{1, 2},
})));
EXPECT_EQ(9, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 2}, {3, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{7, 8, 2, 0, 0},
{9, 0, 4, 0, 0},
{1, 2, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({3, 5}, {2, 2}))
.value(),
MakeArray({
{5, 6},
{7, 8},
})));
EXPECT_EQ(13, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 2}, {3, 5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({
{0, 0, 0, 0, 0},
{1, 1, 1, 0, 0},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
}),
tester.mask_array());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{7, 8, 2, 0, 0},
{9, 0, 4, 5, 6},
{1, 2, 6, 7, 8},
}),
tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankTwoNonExactContainedInExistingMaskRegion) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 1, 2, 0, 0},
{0, 3, 4, 0, 0},
{0, 5, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(
tester.Write((tester.transform() |
Dims(0, 1).TranslateSizedInterval({2, 3}, {2, 2}, {2, 1}))
.value(),
MakeArray({
{7, 8},
{9, 0},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 7, 8, 0, 0},
{0, 3, 4, 0, 0},
{0, 9, 0, 0, 0},
}),
tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankTwoPartialCopy) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
EXPECT_THAT(
tester.Write((tester.transform() |
Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
}),
[](const int* source, int* dest, void* arg) {
if (*source == 4) return false;
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(WriteToMaskedArrayTest, RankTwoIndexArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 2},
{1, 4},
{2, 3},
})))
.value(),
MakeArray({1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {4, 5}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 0, 2, 0, 0},
{0, 3, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.mask_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 3},
{1, 4},
{2, 3},
})))
.value(),
MakeArray({4, 5, 6})));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {4, 5}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 4, 5, 0, 0},
{0, 6, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 1, 1, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.mask_array());
}
TEST(WriteToMaskedArrayTest, IndexArrayLarge) {
const Index kSize = 32768;
auto index_array = tensorstore::AllocateArray<Index>({kSize});
for (Index i = 0; i < kSize; ++i) {
index_array(i) = i;
}
auto fill_array =
tensorstore::BroadcastArray(tensorstore::MakeScalarArray<int>(42),
tensorstore::span<const Index>({2, kSize}))
.value();
auto mask_array =
tensorstore::BroadcastArray(tensorstore::MakeScalarArray<bool>(true),
tensorstore::span<const Index>({2, kSize}))
.value();
MaskedArrayWriteTester<int> tester{fill_array.domain()};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(1).OuterIndexArraySlice(index_array)).value(),
fill_array));
EXPECT_EQ(fill_array.num_elements(), tester.num_masked_elements());
EXPECT_EQ(fill_array.domain(), tester.mask_region());
EXPECT_EQ(fill_array, tester.dest_array());
EXPECT_EQ(mask_array, tester.mask_array());
}
TEST(WriteToMaskedArrayTest, RankOneInvalidTransform) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {4})};
EXPECT_THAT(
tester.Write((tester.transform() | Dims(0).SizedInterval(2, 3)).value(),
MakeOffsetArray({1}, {1, 2, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({0, 0, 0, 0}), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOnePartialCopyDefaultError) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
EXPECT_THAT(
tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3}),
[](const int* source, int* dest, void* arg) {
if (*source == 2) return false;
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(WriteToMaskedArrayTest, RankOnePartialCopyCustomError) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
EXPECT_THAT(
tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3}),
[](const int* source, int* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 2) {
*status = absl::UnknownError("My custom error");
return false;
}
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "My custom error"));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(RebaseMaskedArrayTest, Empty) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
tester.Rebase(MakeArray({
{1, 2, 3},
{4, 5, 6},
}));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, Full) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({1, 2}, {2, 3}))
.value(),
MakeArray({
{1, 2, 3},
{4, 5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
tester.Rebase(MakeArray({
{7, 7, 7},
{7, 7, 7},
}));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, NoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {1, 2}))
.value(),
MakeArray({
{1, 2},
})));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {1, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0},
{0, 1, 2},
}),
tester.dest_array());
tester.Rebase(MakeArray({
{3, 4, 5},
{6, 7, 8},
}));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {1, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{3, 4, 5},
{6, 1, 2},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, MaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 2},
{1, 4},
})))
.value(),
MakeArray({1, 2})));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 0, 2},
{0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1},
{0, 0, 0},
}),
tester.mask_array());
tester.Rebase(MakeArray({
{3, 4, 5},
{6, 7, 8},
}));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 4, 2},
{6, 7, 8},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1},
{0, 0, 0},
}),
tester.mask_array());
}
TEST(UnionMasksTest, FirstEmpty) {
MaskedArrayTester tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, SecondEmpty) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayTester tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, MaskArrayAndMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).IndexArraySlice(MakeArray<Index>({1, 3})))
.value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).IndexArraySlice(MakeArray<Index>({1, 4})))
.value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 0}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {4}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, NoMaskArrayAndNoMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 2)).value(),
MakeArray({1, 2})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, NoMaskArrayAndNoMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 1, 0, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndNoMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, NoMaskArrayAndMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndNoMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(ResetTest, NoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(BoxView({4}, {2}), tester.mask_region());
EXPECT_EQ(2, tester.num_masked_elements());
tester.Reset();
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(ResetTest, MaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_EQ(2, tester.num_masked_elements());
tester.Reset();
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_EQ(0, tester.num_masked_elements());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/masked_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/masked_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e130505a-4fdf-4412-8ae6-7a6268b668b5 | cpp | google/quiche | quic_versions | quiche/quic/core/quic_versions.cc | quiche/quic/core/quic_versions_test.cc | #include "quiche/quic/core/quic_versions.h"
#include <algorithm>
#include <ostream>
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_tag.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace {
QuicVersionLabel CreateRandomVersionLabelForNegotiation() {
QuicVersionLabel result;
if (!GetQuicFlag(quic_disable_version_negotiation_grease_randomness)) {
QuicRandom::GetInstance()->RandBytes(&result, sizeof(result));
} else {
result = MakeVersionLabel(0xd1, 0x57, 0x38, 0x3f);
}
result &= 0xf0f0f0f0;
result |= 0x0a0a0a0a;
return result;
}
void SetVersionFlag(const ParsedQuicVersion& version, bool should_enable) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
const bool enable = should_enable;
const bool disable = !should_enable;
if (version == ParsedQuicVersion::RFCv2()) {
SetQuicReloadableFlag(quic_enable_version_rfcv2, enable);
} else if (version == ParsedQuicVersion::RFCv1()) {
SetQuicReloadableFlag(quic_disable_version_rfcv1, disable);
} else if (version == ParsedQuicVersion::Draft29()) {
SetQuicReloadableFlag(quic_disable_version_draft_29, disable);
} else if (version == ParsedQuicVersion::Q046()) {
SetQuicReloadableFlag(quic_disable_version_q046, disable);
} else {
QUIC_BUG(quic_bug_10589_1)
<< "Cannot " << (enable ? "en" : "dis") << "able version " << version;
}
}
}
bool ParsedQuicVersion::IsKnown() const {
QUICHE_DCHECK(ParsedQuicVersionIsValid(handshake_protocol, transport_version))
<< QuicVersionToString(transport_version) << " "
<< HandshakeProtocolToString(handshake_protocol);
return transport_version != QUIC_VERSION_UNSUPPORTED;
}
bool ParsedQuicVersion::KnowsWhichDecrypterToUse() const {
QUICHE_DCHECK(IsKnown());
return transport_version > QUIC_VERSION_46;
}
bool ParsedQuicVersion::UsesInitialObfuscators() const {
QUICHE_DCHECK(IsKnown());
return transport_version > QUIC_VERSION_46;
}
bool ParsedQuicVersion::AllowsLowFlowControlLimits() const {
QUICHE_DCHECK(IsKnown());
return UsesHttp3();
}
bool ParsedQuicVersion::HasHeaderProtection() const {
QUICHE_DCHECK(IsKnown());
return transport_version > QUIC_VERSION_46;
}
bool ParsedQuicVersion::SupportsRetry() const {
QUICHE_DCHECK(IsKnown());
return transport_version > QUIC_VERSION_46;
}
bool ParsedQuicVersion::SendsVariableLengthPacketNumberInLongHeader() const {
QUICHE_DCHECK(IsKnown());
return transport_version > QUIC_VERSION_46;
}
bool ParsedQuicVersion::AllowsVariableLengthConnectionIds() const {
QUICHE_DCHECK(IsKnown());
return VersionAllowsVariableLengthConnectionIds(transport_version);
}
bool ParsedQuicVersion::SupportsClientConnectionIds() const {
QUICHE_DCHECK(IsKnown());
return transport_version > QUIC_VERSION_46;
}
bool ParsedQuicVersion::HasLengthPrefixedConnectionIds() const {
QUICHE_DCHECK(IsKnown());
return VersionHasLengthPrefixedConnectionIds(transport_version);
}
bool ParsedQuicVersion::SupportsAntiAmplificationLimit() const {
QUICHE_DCHECK(IsKnown());
return UsesHttp3();
}
bool ParsedQuicVersion::CanSendCoalescedPackets() const {
QUICHE_DCHECK(IsKnown());
return HasLongHeaderLengths() && UsesTls();
}
bool ParsedQuicVersion::SupportsGoogleAltSvcFormat() const {
QUICHE_DCHECK(IsKnown());
return VersionSupportsGoogleAltSvcFormat(transport_version);
}
bool ParsedQuicVersion::UsesHttp3() const {
QUICHE_DCHECK(IsKnown());
return VersionUsesHttp3(transport_version);
}
bool ParsedQuicVersion::HasLongHeaderLengths() const {
QUICHE_DCHECK(IsKnown());
return QuicVersionHasLongHeaderLengths(transport_version);
}
bool ParsedQuicVersion::UsesCryptoFrames() const {
QUICHE_DCHECK(IsKnown());
return QuicVersionUsesCryptoFrames(transport_version);
}
bool ParsedQuicVersion::HasIetfQuicFrames() const {
QUICHE_DCHECK(IsKnown());
return VersionHasIetfQuicFrames(transport_version);
}
bool ParsedQuicVersion::UsesLegacyTlsExtension() const {
QUICHE_DCHECK(IsKnown());
return UsesTls() && transport_version <= QUIC_VERSION_IETF_DRAFT_29;
}
bool ParsedQuicVersion::UsesTls() const {
QUICHE_DCHECK(IsKnown());
return handshake_protocol == PROTOCOL_TLS1_3;
}
bool ParsedQuicVersion::UsesQuicCrypto() const {
QUICHE_DCHECK(IsKnown());
return handshake_protocol == PROTOCOL_QUIC_CRYPTO;
}
bool ParsedQuicVersion::UsesV2PacketTypes() const {
QUICHE_DCHECK(IsKnown());
return transport_version == QUIC_VERSION_IETF_RFC_V2;
}
bool ParsedQuicVersion::AlpnDeferToRFCv1() const {
QUICHE_DCHECK(IsKnown());
return transport_version == QUIC_VERSION_IETF_RFC_V2;
}
bool VersionHasLengthPrefixedConnectionIds(
QuicTransportVersion transport_version) {
QUICHE_DCHECK(transport_version != QUIC_VERSION_UNSUPPORTED);
return transport_version > QUIC_VERSION_46;
}
std::ostream& operator<<(std::ostream& os, const ParsedQuicVersion& version) {
os << ParsedQuicVersionToString(version);
return os;
}
std::ostream& operator<<(std::ostream& os,
const ParsedQuicVersionVector& versions) {
os << ParsedQuicVersionVectorToString(versions);
return os;
}
QuicVersionLabel MakeVersionLabel(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
return MakeQuicTag(d, c, b, a);
}
std::ostream& operator<<(std::ostream& os,
const QuicVersionLabelVector& version_labels) {
os << QuicVersionLabelVectorToString(version_labels);
return os;
}
std::ostream& operator<<(std::ostream& os,
const QuicTransportVersionVector& transport_versions) {
os << QuicTransportVersionVectorToString(transport_versions);
return os;
}
QuicVersionLabel CreateQuicVersionLabel(ParsedQuicVersion parsed_version) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
if (parsed_version == ParsedQuicVersion::RFCv2()) {
return MakeVersionLabel(0x6b, 0x33, 0x43, 0xcf);
} else if (parsed_version == ParsedQuicVersion::RFCv1()) {
return MakeVersionLabel(0x00, 0x00, 0x00, 0x01);
} else if (parsed_version == ParsedQuicVersion::Draft29()) {
return MakeVersionLabel(0xff, 0x00, 0x00, 29);
} else if (parsed_version == ParsedQuicVersion::Q046()) {
return MakeVersionLabel('Q', '0', '4', '6');
} else if (parsed_version == ParsedQuicVersion::ReservedForNegotiation()) {
return CreateRandomVersionLabelForNegotiation();
}
QUIC_BUG(quic_bug_10589_2)
<< "Unsupported version "
<< QuicVersionToString(parsed_version.transport_version) << " "
<< HandshakeProtocolToString(parsed_version.handshake_protocol);
return 0;
}
QuicVersionLabelVector CreateQuicVersionLabelVector(
const ParsedQuicVersionVector& versions) {
QuicVersionLabelVector out;
out.reserve(versions.size());
for (const auto& version : versions) {
out.push_back(CreateQuicVersionLabel(version));
}
return out;
}
ParsedQuicVersionVector AllSupportedVersionsWithQuicCrypto() {
ParsedQuicVersionVector versions;
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (version.handshake_protocol == PROTOCOL_QUIC_CRYPTO) {
versions.push_back(version);
}
}
QUIC_BUG_IF(quic_bug_10589_3, versions.empty())
<< "No version with QUIC crypto found.";
return versions;
}
ParsedQuicVersionVector CurrentSupportedVersionsWithQuicCrypto() {
ParsedQuicVersionVector versions;
for (const ParsedQuicVersion& version : CurrentSupportedVersions()) {
if (version.handshake_protocol == PROTOCOL_QUIC_CRYPTO) {
versions.push_back(version);
}
}
QUIC_BUG_IF(quic_bug_10589_4, versions.empty())
<< "No version with QUIC crypto found.";
return versions;
}
ParsedQuicVersionVector AllSupportedVersionsWithTls() {
ParsedQuicVersionVector versions;
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (version.UsesTls()) {
versions.push_back(version);
}
}
QUIC_BUG_IF(quic_bug_10589_5, versions.empty())
<< "No version with TLS handshake found.";
return versions;
}
ParsedQuicVersionVector CurrentSupportedVersionsWithTls() {
ParsedQuicVersionVector versions;
for (const ParsedQuicVersion& version : CurrentSupportedVersions()) {
if (version.UsesTls()) {
versions.push_back(version);
}
}
QUIC_BUG_IF(quic_bug_10589_6, versions.empty())
<< "No version with TLS handshake found.";
return versions;
}
ParsedQuicVersionVector ObsoleteSupportedVersions() {
return ParsedQuicVersionVector{quic::ParsedQuicVersion::Q046(),
quic::ParsedQuicVersion::Draft29()};
}
bool IsObsoleteSupportedVersion(ParsedQuicVersion version) {
static const ParsedQuicVersionVector obsolete_versions =
ObsoleteSupportedVersions();
for (const ParsedQuicVersion& obsolete_version : obsolete_versions) {
if (version == obsolete_version) {
return true;
}
}
return false;
}
ParsedQuicVersionVector CurrentSupportedVersionsForClients() {
ParsedQuicVersionVector versions;
for (const ParsedQuicVersion& version : CurrentSupportedVersionsWithTls()) {
QUICHE_DCHECK_EQ(version.handshake_protocol, PROTOCOL_TLS1_3);
if (version.transport_version >= QUIC_VERSION_IETF_RFC_V1) {
versions.push_back(version);
}
}
QUIC_BUG_IF(quic_bug_10589_8, versions.empty())
<< "No supported client versions found.";
return versions;
}
ParsedQuicVersionVector CurrentSupportedHttp3Versions() {
ParsedQuicVersionVector versions;
for (const ParsedQuicVersion& version : CurrentSupportedVersions()) {
if (version.UsesHttp3()) {
versions.push_back(version);
}
}
QUIC_BUG_IF(no_version_uses_http3, versions.empty())
<< "No version speaking Http3 found.";
return versions;
}
ParsedQuicVersion ParseQuicVersionLabel(QuicVersionLabel version_label) {
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (version_label == CreateQuicVersionLabel(version)) {
return version;
}
}
QUIC_DLOG(INFO) << "Unsupported QuicVersionLabel version: "
<< QuicVersionLabelToString(version_label);
return UnsupportedQuicVersion();
}
ParsedQuicVersionVector ParseQuicVersionLabelVector(
const QuicVersionLabelVector& version_labels) {
ParsedQuicVersionVector parsed_versions;
for (const QuicVersionLabel& version_label : version_labels) {
ParsedQuicVersion parsed_version = ParseQuicVersionLabel(version_label);
if (parsed_version.IsKnown()) {
parsed_versions.push_back(parsed_version);
}
}
return parsed_versions;
}
ParsedQuicVersion ParseQuicVersionString(absl::string_view version_string) {
if (version_string.empty()) {
return UnsupportedQuicVersion();
}
const ParsedQuicVersionVector supported_versions = AllSupportedVersions();
for (const ParsedQuicVersion& version : supported_versions) {
if (version_string == ParsedQuicVersionToString(version) ||
(version_string == AlpnForVersion(version) &&
!version.AlpnDeferToRFCv1()) ||
(version.handshake_protocol == PROTOCOL_QUIC_CRYPTO &&
version_string == QuicVersionToString(version.transport_version))) {
return version;
}
}
for (const ParsedQuicVersion& version : supported_versions) {
if (version.UsesHttp3() &&
version_string ==
QuicVersionLabelToString(CreateQuicVersionLabel(version))) {
return version;
}
}
int quic_version_number = 0;
if (absl::SimpleAtoi(version_string, &quic_version_number) &&
quic_version_number > 0) {
QuicTransportVersion transport_version =
static_cast<QuicTransportVersion>(quic_version_number);
if (!ParsedQuicVersionIsValid(PROTOCOL_QUIC_CRYPTO, transport_version)) {
return UnsupportedQuicVersion();
}
ParsedQuicVersion version(PROTOCOL_QUIC_CRYPTO, transport_version);
if (std::find(supported_versions.begin(), supported_versions.end(),
version) != supported_versions.end()) {
return version;
}
return UnsupportedQuicVersion();
}
QUIC_DLOG(INFO) << "Unsupported QUIC version string: \"" << version_string
<< "\".";
return UnsupportedQuicVersion();
}
ParsedQuicVersionVector ParseQuicVersionVectorString(
absl::string_view versions_string) {
ParsedQuicVersionVector versions;
std::vector<absl::string_view> version_strings =
absl::StrSplit(versions_string, ',');
for (absl::string_view version_string : version_strings) {
quiche::QuicheTextUtils::RemoveLeadingAndTrailingWhitespace(
&version_string);
ParsedQuicVersion version = ParseQuicVersionString(version_string);
if (!version.IsKnown() || std::find(versions.begin(), versions.end(),
version) != versions.end()) {
continue;
}
versions.push_back(version);
}
return versions;
}
QuicTransportVersionVector AllSupportedTransportVersions() {
QuicTransportVersionVector transport_versions;
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (std::find(transport_versions.begin(), transport_versions.end(),
version.transport_version) == transport_versions.end()) {
transport_versions.push_back(version.transport_version);
}
}
return transport_versions;
}
ParsedQuicVersionVector AllSupportedVersions() {
constexpr auto supported_versions = SupportedVersions();
return ParsedQuicVersionVector(supported_versions.begin(),
supported_versions.end());
}
ParsedQuicVersionVector CurrentSupportedVersions() {
return FilterSupportedVersions(AllSupportedVersions());
}
ParsedQuicVersionVector FilterSupportedVersions(
ParsedQuicVersionVector versions) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
ParsedQuicVersionVector filtered_versions;
filtered_versions.reserve(versions.size());
for (const ParsedQuicVersion& version : versions) {
if (version == ParsedQuicVersion::RFCv2()) {
if (GetQuicReloadableFlag(quic_enable_version_rfcv2)) {
filtered_versions.push_back(version);
}
} else if (version == ParsedQuicVersion::RFCv1()) {
if (!GetQuicReloadableFlag(quic_disable_version_rfcv1)) {
filtered_versions.push_back(version);
}
} else if (version == ParsedQuicVersion::Draft29()) {
if (!GetQuicReloadableFlag(quic_disable_version_draft_29)) {
filtered_versions.push_back(version);
}
} else if (version == ParsedQuicVersion::Q046()) {
if (!GetQuicReloadableFlag(quic_disable_version_q046)) {
filtered_versions.push_back(version);
}
} else {
QUIC_BUG(quic_bug_10589_7)
<< "QUIC version " << version << " has no flag protection";
filtered_versions.push_back(version);
}
}
return filtered_versions;
}
ParsedQuicVersionVector ParsedVersionOfIndex(
const ParsedQuicVersionVector& versions, int index) {
ParsedQuicVersionVector version;
int version_count = versions.size();
if (index >= 0 && index < version_count) {
version.push_back(versions[index]);
} else {
version.push_back(UnsupportedQuicVersion());
}
return version;
}
std::string QuicVersionLabelToString(QuicVersionLabel version_label) {
return QuicTagToString(quiche::QuicheEndian::HostToNet32(version_label));
}
ParsedQuicVersion ParseQuicVersionLabelString(
absl::string_view version_label_string) {
const ParsedQuicVersionVector supported_versions = AllSupportedVersions();
for (const ParsedQuicVersion& version : supported_versions) {
if (version_label_string ==
QuicVersionLabelToString(CreateQuicVersionLabel(version))) {
return version;
}
}
return UnsupportedQuicVersion();
}
std::string QuicVersionLabelVectorToString(
const QuicVersionLabelVector& version_labels, const std::string& separator,
size_t skip_after_nth_version) {
std::string result;
for (size_t i = 0; i < version_labels.size(); ++i) {
if (i != 0) {
result.append(separator);
}
if (i > skip_after_nth_version) {
result.append("...");
break;
}
result.append(QuicVersionLabelToString(version_labels[i]));
}
return result;
}
#define RETURN_STRING_LITERAL(x) \
case x: \
return #x
std::string QuicVersionToString(QuicTransportVersion transport_version) {
switch (transport_version) {
RETURN_STRING_LITERAL(QUIC_VERSION_46);
RETURN_STRING_LITERAL(QUIC_VERSION_IETF_DRAFT_29);
RETURN_STRING_LITERAL(QUIC_VERSION_IETF_RFC_V1);
RETURN_STRING_LITERAL(QUIC_VERSION_IETF_RFC_V2);
RETURN_STRING_LITERAL(QUIC_VERSION_UNSUPPORTED);
RETURN_STRING_LITERAL(QUIC_VERSION_RESERVED_FOR_NEGOTIATION);
}
return absl::StrCat("QUIC_VERSION_UNKNOWN(",
static_cast<int>(transport_version), ")");
}
std::string HandshakeProtocolToString(HandshakeProtocol handshake_protocol) {
switch (handshake_protocol) {
RETURN_STRING_LITERAL(PROTOCOL_UNSUPPORTED);
RETURN_STRING_LITERAL(PROTOCOL_QUIC_CRYPTO);
RETURN_STRING_LITERAL(PROTOCOL_TLS1_3);
}
return absl::StrCat("PROTOCOL_UNKNOWN(", static_cast<int>(handshake_protocol),
")");
}
std::string ParsedQuicVersionToString(ParsedQuicVersion version) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
if (version == UnsupportedQuicVersion()) {
return "0";
} else if (version == ParsedQuicVersion::RFCv2()) {
QUICHE_DCHECK(version.UsesHttp3());
return "RFCv2";
} else if (version == ParsedQuicVersion::RFCv1()) {
QUICHE_DCHECK(version.UsesHttp3());
return "RFCv1";
} else if (version == ParsedQuicVersion::Draft29()) {
QUICHE_DCHECK(version.UsesHttp3());
return "draft29";
}
return QuicVersionLabelToString(CreateQuicVersionLabel(version));
}
std::string QuicTransportVersionVectorToString(
const QuicTransportVersionVector& versions) {
std::string result = "";
for (size_t i = 0; i < versions.size(); ++i) {
if (i != 0) {
result.append(",");
}
result.append(QuicVersionToString(versions[i]));
}
return result;
}
std::string ParsedQuicVersionVectorToString(
const ParsedQuicVersionVector& versions, const std::string& separator,
size_t skip_after_nth_version) {
std::string result;
for (size_t i = 0; i < versions.size(); ++i) {
if (i != 0) {
result.append(separator);
}
if (i > skip_after_nth_version) {
result.append("...");
break;
}
result.append(ParsedQuicVersionToString(versions[i]));
}
return result;
}
bool VersionSupportsGoogleAltSvcFormat(QuicTransportVersion transport_version) {
return transport_version <= QUIC_VERSION_46;
}
bool VersionAllowsVariableLengthConnectionIds(
QuicTransportVersion transport_version) {
QUICHE_DCHECK_NE(transport_version, QUIC_VERSION_UNSUPPORTED);
return transport_version > QUIC_VERSION_46;
}
bool QuicVersionLabelUses4BitConnectionIdLength(
QuicVersionLabel version_label) {
for (uint8_t c = '3'; c <= '8'; ++c) {
if (version_label == MakeVersionLabel('Q', '0', '4', c)) {
return true;
}
}
if (version_label == MakeVersionLabel('T', '0', '4', '8')) {
return true;
}
for (uint8_t draft_number = 11; draft_number <= 21; ++draft_number) {
if (version_label == MakeVersionLabel(0xff, 0x00, 0x00, draft_number)) {
return true;
}
}
return false;
}
ParsedQuicVersion UnsupportedQuicVersion() {
return ParsedQuicVersion::Unsupported();
}
ParsedQuicVersion QuicVersionReservedForNegotiation() {
return ParsedQuicVersion::ReservedForNegotiation();
}
std::string AlpnForVersion(ParsedQuicVersion parsed_version) {
if (parsed_version == ParsedQuicVersion::RFCv2()) {
return "h3";
} else if (parsed_version == ParsedQuicVersion::RFCv1()) {
return "h3";
} else if (parsed_version == ParsedQuicVersion::Draft29()) {
return "h3-29";
}
return "h3-" + ParsedQuicVersionToString(parsed_version);
}
void QuicEnableVersion(const ParsedQuicVersion& version) {
SetVersionFlag(version, true);
}
void QuicDisableVersion(const ParsedQuicVersion& version) {
SetVersionFlag(version, false);
}
bool QuicVersionIsEnabled(const ParsedQuicVersion& version) {
ParsedQuicVersionVector current = CurrentSupportedVersions();
return std::find(current.begin(), current.end(), version) != current.end();
}
#undef RETURN_STRING_LITERAL
} | #include "quiche/quic/core/quic_versions.h"
#include <cstddef>
#include <sstream>
#include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
TEST(QuicVersionsTest, CreateQuicVersionLabelUnsupported) {
EXPECT_QUIC_BUG(
CreateQuicVersionLabel(UnsupportedQuicVersion()),
"Unsupported version QUIC_VERSION_UNSUPPORTED PROTOCOL_UNSUPPORTED");
}
TEST(QuicVersionsTest, KnownAndValid) {
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_TRUE(version.IsKnown());
EXPECT_TRUE(ParsedQuicVersionIsValid(version.handshake_protocol,
version.transport_version));
}
ParsedQuicVersion unsupported = UnsupportedQuicVersion();
EXPECT_FALSE(unsupported.IsKnown());
EXPECT_TRUE(ParsedQuicVersionIsValid(unsupported.handshake_protocol,
unsupported.transport_version));
ParsedQuicVersion reserved = QuicVersionReservedForNegotiation();
EXPECT_TRUE(reserved.IsKnown());
EXPECT_TRUE(ParsedQuicVersionIsValid(reserved.handshake_protocol,
reserved.transport_version));
EXPECT_FALSE(ParsedQuicVersionIsValid(PROTOCOL_TLS1_3, QUIC_VERSION_46));
EXPECT_FALSE(ParsedQuicVersionIsValid(PROTOCOL_QUIC_CRYPTO,
QUIC_VERSION_IETF_DRAFT_29));
EXPECT_FALSE(ParsedQuicVersionIsValid(PROTOCOL_QUIC_CRYPTO,
static_cast<QuicTransportVersion>(33)));
EXPECT_FALSE(ParsedQuicVersionIsValid(PROTOCOL_QUIC_CRYPTO,
static_cast<QuicTransportVersion>(99)));
EXPECT_FALSE(ParsedQuicVersionIsValid(PROTOCOL_TLS1_3,
static_cast<QuicTransportVersion>(99)));
}
TEST(QuicVersionsTest, Features) {
ParsedQuicVersion parsed_version_q046 = ParsedQuicVersion::Q046();
ParsedQuicVersion parsed_version_draft_29 = ParsedQuicVersion::Draft29();
EXPECT_TRUE(parsed_version_q046.IsKnown());
EXPECT_FALSE(parsed_version_q046.KnowsWhichDecrypterToUse());
EXPECT_FALSE(parsed_version_q046.UsesInitialObfuscators());
EXPECT_FALSE(parsed_version_q046.AllowsLowFlowControlLimits());
EXPECT_FALSE(parsed_version_q046.HasHeaderProtection());
EXPECT_FALSE(parsed_version_q046.SupportsRetry());
EXPECT_FALSE(
parsed_version_q046.SendsVariableLengthPacketNumberInLongHeader());
EXPECT_FALSE(parsed_version_q046.AllowsVariableLengthConnectionIds());
EXPECT_FALSE(parsed_version_q046.SupportsClientConnectionIds());
EXPECT_FALSE(parsed_version_q046.HasLengthPrefixedConnectionIds());
EXPECT_FALSE(parsed_version_q046.SupportsAntiAmplificationLimit());
EXPECT_FALSE(parsed_version_q046.CanSendCoalescedPackets());
EXPECT_TRUE(parsed_version_q046.SupportsGoogleAltSvcFormat());
EXPECT_FALSE(parsed_version_q046.UsesHttp3());
EXPECT_FALSE(parsed_version_q046.HasLongHeaderLengths());
EXPECT_FALSE(parsed_version_q046.UsesCryptoFrames());
EXPECT_FALSE(parsed_version_q046.HasIetfQuicFrames());
EXPECT_FALSE(parsed_version_q046.UsesTls());
EXPECT_TRUE(parsed_version_q046.UsesQuicCrypto());
EXPECT_TRUE(parsed_version_draft_29.IsKnown());
EXPECT_TRUE(parsed_version_draft_29.KnowsWhichDecrypterToUse());
EXPECT_TRUE(parsed_version_draft_29.UsesInitialObfuscators());
EXPECT_TRUE(parsed_version_draft_29.AllowsLowFlowControlLimits());
EXPECT_TRUE(parsed_version_draft_29.HasHeaderProtection());
EXPECT_TRUE(parsed_version_draft_29.SupportsRetry());
EXPECT_TRUE(
parsed_version_draft_29.SendsVariableLengthPacketNumberInLongHeader());
EXPECT_TRUE(parsed_version_draft_29.AllowsVariableLengthConnectionIds());
EXPECT_TRUE(parsed_version_draft_29.SupportsClientConnectionIds());
EXPECT_TRUE(parsed_version_draft_29.HasLengthPrefixedConnectionIds());
EXPECT_TRUE(parsed_version_draft_29.SupportsAntiAmplificationLimit());
EXPECT_TRUE(parsed_version_draft_29.CanSendCoalescedPackets());
EXPECT_FALSE(parsed_version_draft_29.SupportsGoogleAltSvcFormat());
EXPECT_TRUE(parsed_version_draft_29.UsesHttp3());
EXPECT_TRUE(parsed_version_draft_29.HasLongHeaderLengths());
EXPECT_TRUE(parsed_version_draft_29.UsesCryptoFrames());
EXPECT_TRUE(parsed_version_draft_29.HasIetfQuicFrames());
EXPECT_TRUE(parsed_version_draft_29.UsesTls());
EXPECT_FALSE(parsed_version_draft_29.UsesQuicCrypto());
}
TEST(QuicVersionsTest, ParseQuicVersionLabel) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ(ParsedQuicVersion::Q046(),
ParseQuicVersionLabel(MakeVersionLabel('Q', '0', '4', '6')));
EXPECT_EQ(ParsedQuicVersion::Draft29(),
ParseQuicVersionLabel(MakeVersionLabel(0xff, 0x00, 0x00, 0x1d)));
EXPECT_EQ(ParsedQuicVersion::RFCv1(),
ParseQuicVersionLabel(MakeVersionLabel(0x00, 0x00, 0x00, 0x01)));
EXPECT_EQ(ParsedQuicVersion::RFCv2(),
ParseQuicVersionLabel(MakeVersionLabel(0x6b, 0x33, 0x43, 0xcf)));
EXPECT_EQ((ParsedQuicVersionVector{ParsedQuicVersion::RFCv2(),
ParsedQuicVersion::RFCv1(),
ParsedQuicVersion::Draft29()}),
ParseQuicVersionLabelVector(QuicVersionLabelVector{
MakeVersionLabel(0x6b, 0x33, 0x43, 0xcf),
MakeVersionLabel(0x00, 0x00, 0x00, 0x01),
MakeVersionLabel(0xaa, 0xaa, 0xaa, 0xaa),
MakeVersionLabel(0xff, 0x00, 0x00, 0x1d)}));
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_EQ(version, ParseQuicVersionLabel(CreateQuicVersionLabel(version)));
}
}
TEST(QuicVersionsTest, ParseQuicVersionString) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ(ParsedQuicVersion::Q046(),
ParseQuicVersionString("QUIC_VERSION_46"));
EXPECT_EQ(ParsedQuicVersion::Q046(), ParseQuicVersionString("46"));
EXPECT_EQ(ParsedQuicVersion::Q046(), ParseQuicVersionString("Q046"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionString(""));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionString("Q 46"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionString("Q046 "));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionString("99"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionString("70"));
EXPECT_EQ(ParsedQuicVersion::Draft29(), ParseQuicVersionString("ff00001d"));
EXPECT_EQ(ParsedQuicVersion::Draft29(), ParseQuicVersionString("draft29"));
EXPECT_EQ(ParsedQuicVersion::Draft29(), ParseQuicVersionString("h3-29"));
EXPECT_EQ(ParsedQuicVersion::RFCv1(), ParseQuicVersionString("00000001"));
EXPECT_EQ(ParsedQuicVersion::RFCv1(), ParseQuicVersionString("h3"));
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_EQ(version,
ParseQuicVersionString(ParsedQuicVersionToString(version)));
EXPECT_EQ(version, ParseQuicVersionString(QuicVersionLabelToString(
CreateQuicVersionLabel(version))));
if (!version.AlpnDeferToRFCv1()) {
EXPECT_EQ(version, ParseQuicVersionString(AlpnForVersion(version)));
}
}
}
TEST(QuicVersionsTest, ParseQuicVersionVectorString) {
ParsedQuicVersion version_q046 = ParsedQuicVersion::Q046();
ParsedQuicVersion version_draft_29 = ParsedQuicVersion::Draft29();
EXPECT_THAT(ParseQuicVersionVectorString(""), IsEmpty());
EXPECT_THAT(ParseQuicVersionVectorString("QUIC_VERSION_46"),
ElementsAre(version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("h3-Q046"),
ElementsAre(version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("h3-Q046, h3-29"),
ElementsAre(version_q046, version_draft_29));
EXPECT_THAT(ParseQuicVersionVectorString("h3-29,h3-Q046,h3-29"),
ElementsAre(version_draft_29, version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("h3-29, h3-Q046"),
ElementsAre(version_draft_29, version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("QUIC_VERSION_46,h3-29"),
ElementsAre(version_q046, version_draft_29));
EXPECT_THAT(ParseQuicVersionVectorString("h3-29,QUIC_VERSION_46"),
ElementsAre(version_draft_29, version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("QUIC_VERSION_46, h3-29"),
ElementsAre(version_q046, version_draft_29));
EXPECT_THAT(ParseQuicVersionVectorString("h3-29, QUIC_VERSION_46"),
ElementsAre(version_draft_29, version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("h3-29,QUIC_VERSION_46"),
ElementsAre(version_draft_29, version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("QUIC_VERSION_46,h3-29"),
ElementsAre(version_q046, version_draft_29));
EXPECT_THAT(ParseQuicVersionVectorString("QUIC_VERSION_46, QUIC_VERSION_46"),
ElementsAre(version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("h3-Q046, h3-Q046"),
ElementsAre(version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("h3-Q046, QUIC_VERSION_46"),
ElementsAre(version_q046));
EXPECT_THAT(ParseQuicVersionVectorString(
"QUIC_VERSION_46, h3-Q046, QUIC_VERSION_46, h3-Q046"),
ElementsAre(version_q046));
EXPECT_THAT(ParseQuicVersionVectorString("QUIC_VERSION_46, h3-29, h3-Q046"),
ElementsAre(version_q046, version_draft_29));
EXPECT_THAT(ParseQuicVersionVectorString("99"), IsEmpty());
EXPECT_THAT(ParseQuicVersionVectorString("70"), IsEmpty());
EXPECT_THAT(ParseQuicVersionVectorString("h3-01"), IsEmpty());
EXPECT_THAT(ParseQuicVersionVectorString("h3-01,h3-29"),
ElementsAre(version_draft_29));
}
TEST(QuicVersionsTest, CreateQuicVersionLabel) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ(0x51303436u, CreateQuicVersionLabel(ParsedQuicVersion::Q046()));
EXPECT_EQ(0xff00001du, CreateQuicVersionLabel(ParsedQuicVersion::Draft29()));
EXPECT_EQ(0x00000001u, CreateQuicVersionLabel(ParsedQuicVersion::RFCv1()));
EXPECT_EQ(0x6b3343cfu, CreateQuicVersionLabel(ParsedQuicVersion::RFCv2()));
EXPECT_EQ(
0xda5a3a3au & 0x0f0f0f0f,
CreateQuicVersionLabel(ParsedQuicVersion::ReservedForNegotiation()) &
0x0f0f0f0f);
SetQuicFlag(quic_disable_version_negotiation_grease_randomness, true);
EXPECT_EQ(0xda5a3a3au, CreateQuicVersionLabel(
ParsedQuicVersion::ReservedForNegotiation()));
}
TEST(QuicVersionsTest, QuicVersionLabelToString) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ("Q046", QuicVersionLabelToString(
CreateQuicVersionLabel(ParsedQuicVersion::Q046())));
EXPECT_EQ("ff00001d", QuicVersionLabelToString(CreateQuicVersionLabel(
ParsedQuicVersion::Draft29())));
EXPECT_EQ("00000001", QuicVersionLabelToString(CreateQuicVersionLabel(
ParsedQuicVersion::RFCv1())));
EXPECT_EQ("6b3343cf", QuicVersionLabelToString(CreateQuicVersionLabel(
ParsedQuicVersion::RFCv2())));
QuicVersionLabelVector version_labels = {
MakeVersionLabel('Q', '0', '3', '5'),
MakeVersionLabel('T', '0', '3', '8'),
MakeVersionLabel(0xff, 0, 0, 7),
};
EXPECT_EQ("Q035", QuicVersionLabelToString(version_labels[0]));
EXPECT_EQ("T038", QuicVersionLabelToString(version_labels[1]));
EXPECT_EQ("ff000007", QuicVersionLabelToString(version_labels[2]));
EXPECT_EQ("Q035,T038,ff000007",
QuicVersionLabelVectorToString(version_labels));
EXPECT_EQ("Q035:T038:ff000007",
QuicVersionLabelVectorToString(version_labels, ":", 2));
EXPECT_EQ("Q035|T038|...",
QuicVersionLabelVectorToString(version_labels, "|", 1));
std::ostringstream os;
os << version_labels;
EXPECT_EQ("Q035,T038,ff000007", os.str());
}
TEST(QuicVersionsTest, ParseQuicVersionLabelString) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ(ParsedQuicVersion::Q046(), ParseQuicVersionLabelString("Q046"));
EXPECT_EQ(ParsedQuicVersion::Draft29(),
ParseQuicVersionLabelString("ff00001d"));
EXPECT_EQ(ParsedQuicVersion::RFCv1(),
ParseQuicVersionLabelString("00000001"));
EXPECT_EQ(ParsedQuicVersion::RFCv2(),
ParseQuicVersionLabelString("6b3343cf"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionLabelString("1"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionLabelString("46"));
EXPECT_EQ(UnsupportedQuicVersion(),
ParseQuicVersionLabelString("QUIC_VERSION_46"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionLabelString("h3"));
EXPECT_EQ(UnsupportedQuicVersion(), ParseQuicVersionLabelString("h3-29"));
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_EQ(version, ParseQuicVersionLabelString(QuicVersionLabelToString(
CreateQuicVersionLabel(version))));
}
}
TEST(QuicVersionsTest, QuicVersionToString) {
EXPECT_EQ("QUIC_VERSION_UNSUPPORTED",
QuicVersionToString(QUIC_VERSION_UNSUPPORTED));
QuicTransportVersion single_version[] = {QUIC_VERSION_46};
QuicTransportVersionVector versions_vector;
for (size_t i = 0; i < ABSL_ARRAYSIZE(single_version); ++i) {
versions_vector.push_back(single_version[i]);
}
EXPECT_EQ("QUIC_VERSION_46",
QuicTransportVersionVectorToString(versions_vector));
QuicTransportVersion multiple_versions[] = {QUIC_VERSION_UNSUPPORTED,
QUIC_VERSION_46};
versions_vector.clear();
for (size_t i = 0; i < ABSL_ARRAYSIZE(multiple_versions); ++i) {
versions_vector.push_back(multiple_versions[i]);
}
EXPECT_EQ("QUIC_VERSION_UNSUPPORTED,QUIC_VERSION_46",
QuicTransportVersionVectorToString(versions_vector));
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_NE("QUIC_VERSION_UNSUPPORTED",
QuicVersionToString(version.transport_version));
}
std::ostringstream os;
os << versions_vector;
EXPECT_EQ("QUIC_VERSION_UNSUPPORTED,QUIC_VERSION_46", os.str());
}
TEST(QuicVersionsTest, ParsedQuicVersionToString) {
EXPECT_EQ("0", ParsedQuicVersionToString(ParsedQuicVersion::Unsupported()));
EXPECT_EQ("Q046", ParsedQuicVersionToString(ParsedQuicVersion::Q046()));
EXPECT_EQ("draft29", ParsedQuicVersionToString(ParsedQuicVersion::Draft29()));
EXPECT_EQ("RFCv1", ParsedQuicVersionToString(ParsedQuicVersion::RFCv1()));
EXPECT_EQ("RFCv2", ParsedQuicVersionToString(ParsedQuicVersion::RFCv2()));
ParsedQuicVersionVector versions_vector = {ParsedQuicVersion::Q046()};
EXPECT_EQ("Q046", ParsedQuicVersionVectorToString(versions_vector));
versions_vector = {ParsedQuicVersion::Unsupported(),
ParsedQuicVersion::Q046()};
EXPECT_EQ("0,Q046", ParsedQuicVersionVectorToString(versions_vector));
EXPECT_EQ("0:Q046", ParsedQuicVersionVectorToString(versions_vector, ":",
versions_vector.size()));
EXPECT_EQ("0|...", ParsedQuicVersionVectorToString(versions_vector, "|", 0));
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_NE("0", ParsedQuicVersionToString(version));
}
std::ostringstream os;
os << versions_vector;
EXPECT_EQ("0,Q046", os.str());
}
TEST(QuicVersionsTest, FilterSupportedVersionsAllVersions) {
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
QuicEnableVersion(version);
}
ParsedQuicVersionVector expected_parsed_versions;
for (const ParsedQuicVersion& version : SupportedVersions()) {
expected_parsed_versions.push_back(version);
}
EXPECT_EQ(expected_parsed_versions,
FilterSupportedVersions(AllSupportedVersions()));
EXPECT_EQ(expected_parsed_versions, AllSupportedVersions());
}
TEST(QuicVersionsTest, FilterSupportedVersionsWithoutFirstVersion) {
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
QuicEnableVersion(version);
}
QuicDisableVersion(AllSupportedVersions().front());
ParsedQuicVersionVector expected_parsed_versions;
for (const ParsedQuicVersion& version : SupportedVersions()) {
expected_parsed_versions.push_back(version);
}
expected_parsed_versions.erase(expected_parsed_versions.begin());
EXPECT_EQ(expected_parsed_versions,
FilterSupportedVersions(AllSupportedVersions()));
}
TEST(QuicVersionsTest, LookUpParsedVersionByIndex) {
ParsedQuicVersionVector all_versions = AllSupportedVersions();
int version_count = all_versions.size();
for (int i = -5; i <= version_count + 1; ++i) {
ParsedQuicVersionVector index = ParsedVersionOfIndex(all_versions, i);
if (i >= 0 && i < version_count) {
EXPECT_EQ(all_versions[i], index[0]);
} else {
EXPECT_EQ(UnsupportedQuicVersion(), index[0]);
}
}
}
TEST(QuicVersionsTest, CheckTransportVersionNumbersForTypos) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ(QUIC_VERSION_46, 46);
EXPECT_EQ(QUIC_VERSION_IETF_DRAFT_29, 73);
EXPECT_EQ(QUIC_VERSION_IETF_RFC_V1, 80);
EXPECT_EQ(QUIC_VERSION_IETF_RFC_V2, 82);
}
TEST(QuicVersionsTest, AlpnForVersion) {
static_assert(SupportedVersions().size() == 4u,
"Supported versions out of sync");
EXPECT_EQ("h3-Q046", AlpnForVersion(ParsedQuicVersion::Q046()));
EXPECT_EQ("h3-29", AlpnForVersion(ParsedQuicVersion::Draft29()));
EXPECT_EQ("h3", AlpnForVersion(ParsedQuicVersion::RFCv1()));
EXPECT_EQ("h3", AlpnForVersion(ParsedQuicVersion::RFCv2()));
}
TEST(QuicVersionsTest, QuicVersionEnabling) {
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
QuicFlagSaver flag_saver;
QuicDisableVersion(version);
EXPECT_FALSE(QuicVersionIsEnabled(version));
QuicEnableVersion(version);
EXPECT_TRUE(QuicVersionIsEnabled(version));
}
}
TEST(QuicVersionsTest, ReservedForNegotiation) {
EXPECT_EQ(QUIC_VERSION_RESERVED_FOR_NEGOTIATION,
QuicVersionReservedForNegotiation().transport_version);
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
EXPECT_NE(QUIC_VERSION_RESERVED_FOR_NEGOTIATION, version.transport_version);
}
}
TEST(QuicVersionsTest, SupportedVersionsHasCorrectList) {
size_t index = 0;
for (HandshakeProtocol handshake_protocol : SupportedHandshakeProtocols()) {
for (int trans_vers = 255; trans_vers > 0; trans_vers--) {
QuicTransportVersion transport_version =
static_cast<QuicTransportVersion>(trans_vers);
SCOPED_TRACE(index);
if (ParsedQuicVersionIsValid(handshake_protocol, transport_version)) {
ParsedQuicVersion version = SupportedVersions()[index];
EXPECT_EQ(version,
ParsedQuicVersion(handshake_protocol, transport_version));
index++;
}
}
}
EXPECT_EQ(SupportedVersions().size(), index);
}
TEST(QuicVersionsTest, SupportedVersionsAllDistinct) {
for (size_t index1 = 0; index1 < SupportedVersions().size(); ++index1) {
ParsedQuicVersion version1 = SupportedVersions()[index1];
for (size_t index2 = index1 + 1; index2 < SupportedVersions().size();
++index2) {
ParsedQuicVersion version2 = SupportedVersions()[index2];
EXPECT_NE(version1, version2) << version1 << " " << version2;
EXPECT_NE(CreateQuicVersionLabel(version1),
CreateQuicVersionLabel(version2))
<< version1 << " " << version2;
if ((version1 != ParsedQuicVersion::RFCv2()) &&
(version2 != ParsedQuicVersion::RFCv1())) {
EXPECT_NE(AlpnForVersion(version1), AlpnForVersion(version2))
<< version1 << " " << version2;
}
}
}
}
TEST(QuicVersionsTest, CurrentSupportedHttp3Versions) {
ParsedQuicVersionVector h3_versions = CurrentSupportedHttp3Versions();
ParsedQuicVersionVector all_current_supported_versions =
CurrentSupportedVersions();
for (auto& version : all_current_supported_versions) {
bool version_is_h3 = false;
for (auto& h3_version : h3_versions) {
if (version == h3_version) {
EXPECT_TRUE(version.UsesHttp3());
version_is_h3 = true;
break;
}
}
if (!version_is_h3) {
EXPECT_FALSE(version.UsesHttp3());
}
}
}
TEST(QuicVersionsTest, ObsoleteSupportedVersions) {
ParsedQuicVersionVector obsolete_versions = ObsoleteSupportedVersions();
EXPECT_EQ(quic::ParsedQuicVersion::Q046(), obsolete_versions[0]);
EXPECT_EQ(quic::ParsedQuicVersion::Draft29(), obsolete_versions[1]);
}
TEST(QuicVersionsTest, IsObsoleteSupportedVersion) {
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
bool is_obsolete = version.handshake_protocol != PROTOCOL_TLS1_3 ||
version.transport_version < QUIC_VERSION_IETF_RFC_V1;
EXPECT_EQ(is_obsolete, IsObsoleteSupportedVersion(version));
}
}
TEST(QuicVersionsTest, CurrentSupportedVersionsForClients) {
ParsedQuicVersionVector supported_versions = CurrentSupportedVersions();
ParsedQuicVersionVector client_versions =
CurrentSupportedVersionsForClients();
for (auto& version : supported_versions) {
const bool is_obsolete = IsObsoleteSupportedVersion(version);
const bool is_supported =
absl::c_find(client_versions, version) != client_versions.end();
EXPECT_EQ(!is_obsolete, is_supported);
}
for (auto& version : client_versions) {
EXPECT_TRUE(absl::c_find(supported_versions, version) !=
supported_versions.end());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_versions.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_versions_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
fc72ca28-1a60-4ddb-be48-f5dd209aa2ef | cpp | tensorflow/tensorflow | ifrt_backend | third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend.cc | third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend_test.cc | #include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/remap_plan.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/value.h"
#include "xla/python/ifrt_proxy/common/array_util.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/proto_util.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace proxy {
IfrtBackend::IfrtBackend(IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store)
: version_(std::move(version)),
session_id_(session_id),
client_(std::move(ifrt_client)),
host_buffer_store_(std::move(host_buffer_store)),
compile_thread_pool_(
tsl::Env::Default(),
[]() {
tsl::ThreadOptions options;
options.stack_size = 240 * 1024;
return options;
}(),
"IfrtBackend",
32) {}
absl::StatusOr<std::unique_ptr<IfrtBackend>> IfrtBackend::Create(
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store) {
if (ifrt_client == nullptr) {
return absl::InvalidArgumentError("ifrt_client cannot be a nullptr.");
}
if (version.protocol_version() < kServerMinVersion ||
version.protocol_version() > kServerMaxVersion) {
return absl::FailedPreconditionError(absl::StrCat(
"Protocol version ", version.protocol_version(),
" is unsupported by IFRT Proxy server; supported versions: [",
kServerMinVersion, ",", kServerMaxVersion, "]"));
}
return absl::WrapUnique<IfrtBackend>(
new IfrtBackend(std::move(version), session_id, std::move(ifrt_client),
std::move(host_buffer_store)));
}
IfrtBackend::~IfrtBackend() {
{
absl::MutexLock lock(&host_callback_queues_mutex_);
for (const auto& [key, queue] : host_callback_queues_) {
queue->Close();
}
}
absl::flat_hash_map<uint64_t, RemoteLoadedHostCallbackQueue::ExecutionRequest>
host_callback_executions;
{
absl::MutexLock lock(&host_callback_executions_mutex_);
host_callback_executions.swap(host_callback_executions_);
}
for (auto& [handle, execution_request] : host_callback_executions) {
std::move(execution_request)
.status.Set(absl::CancelledError("IFRT backend has shut down"));
}
{
auto done = [this]() ABSL_SHARED_LOCKS_REQUIRED(in_flight_count_mutex_) {
return in_flight_count_ == 0;
};
absl::MutexLock lock(&in_flight_count_mutex_, absl::Condition(&done));
}
}
Future<BackendInterface::Response> IfrtBackend::Process(
std::unique_ptr<IfrtRequest> request) {
switch (request->request_case()) {
case IfrtRequest::RequestCase::kInitRequest:
return Future<Response>(HandleInit(std::move(request)));
case IfrtRequest::RequestCase::kCheckFutureRequest:
return HandleCheckFutureRequest(std::move(request));
case IfrtRequest::RequestCase::kMakeArrayFromHostBufferRequest:
return Future<Response>(
HandleMakeArrayFromHostBufferRequest(std::move(request)));
case IfrtRequest::RequestCase::kAssembleArrayFromSingleDeviceArraysRequest:
return Future<Response>(
HandleAssembleArrayFromSingleDeviceArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kRemapArraysRequest:
return Future<Response>(HandleRemapArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kCopyToHostBufferRequest:
return HandleCopyToHostBufferRequest(std::move(request));
case IfrtRequest::RequestCase::kDisassembleIntoSingleDeviceArraysRequest:
return Future<Response>(
HandleDisassembleIntoSingleDeviceArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kCheckValueReadyRequest:
return Future<Response>(HandleCheckValueReadyRequest(std::move(request)));
case IfrtRequest::RequestCase::kCopyArraysRequest:
return Future<Response>(HandleCopyArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kReshardRequest:
return Future<Response>(HandleReshardRequest(std::move(request)));
case IfrtRequest::RequestCase::kFullyReplicatedShardRequest:
return Future<Response>(
HandleFullyReplicatedShardRequest(std::move(request)));
case IfrtRequest::RequestCase::kDeleteArrayRequest:
return Future<Response>(HandleDeleteArrayRequest(std::move(request)));
case IfrtRequest::RequestCase::kIsArrayDeletedRequest:
return Future<Response>(HandleIsArrayDeletedRequest(std::move(request)));
case IfrtRequest::RequestCase::kDestructArrayRequest:
return Future<Response>(HandleDestructArrayRequest(std::move(request)));
case IfrtRequest::RequestCase::kCompileRequest:
return Future<Response>(HandleCompileRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableMetadataRequest:
return HandleLoadedExecutableMetadataRequest(std::move(request));
case IfrtRequest::RequestCase::kLoadedExecutableExecuteRequest:
return Future<Response>(
HandleLoadedExecutableExecuteRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableDeleteRequest:
return Future<Response>(
HandleLoadedExecutableDeleteRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableIsDeletedRequest:
return Future<Response>(
HandleLoadedExecutableIsDeletedRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableDestructRequest:
return Future<Response>(
HandleLoadedExecutableDestructRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedHostCallbackPollRequest:
return HandleLoadedHostCallbackPollRequest(std::move(request));
case IfrtRequest::RequestCase::kLoadedHostCallbackReturnRequest:
return Future<Response>(
HandleLoadedHostCallbackReturnRequest(std::move(request)));
case IfrtRequest::RequestCase::kGetDefaultDeviceAssignmentRequest:
return Future<Response>(
HandleGetDefaultDeviceAssignmentRequest(std::move(request)));
default:
LOG(ERROR) << "Got unimplemented request type: "
<< request->DebugString();
return Future<Response>(absl::UnimplementedError(absl::StrCat(
"Got unimplemented request type: ", request->request_case())));
}
}
uint64_t IfrtBackend::HandleGenerator::New() {
absl::MutexLock lock(&mu_);
return current_++;
}
void IfrtBackend::HandleGenerator::BulkNew(absl::Span<uint64_t> handles) {
absl::MutexLock lock(&mu_);
std::iota(handles.begin(), handles.end(), current_);
current_ += handles.size();
}
Future<BackendInterface::Response> IfrtBackend::AsyncExecute(
std::function<absl::StatusOr<Response>()> handle_fn,
tsl::thread::ThreadPool* thread_pool) {
{
absl::MutexLock lock(&in_flight_count_mutex_);
++in_flight_count_;
}
auto promise = Future<Response>::CreatePromise();
auto f = [this, promise, handle_fn = std::move(handle_fn)]() mutable {
promise.Set(handle_fn());
{
absl::MutexLock lock(&in_flight_count_mutex_);
--in_flight_count_;
}
};
if (thread_pool != nullptr) {
thread_pool->Schedule(std::move(f));
} else {
tsl::Env::Default()->SchedClosure(std::move(f));
}
return Future<Response>(std::move(promise));
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleInit(
std::unique_ptr<IfrtRequest> request) {
std::unique_ptr<IfrtResponse> response =
NewIfrtResponse(request->request_metadata().op_id());
auto* init_resp = response->mutable_init_response();
init_resp->set_session_id(session_id_);
init_resp->set_platform_name(AsProtoStringData(client_->platform_name()));
init_resp->set_platform_version(
AsProtoStringData(client_->platform_version()));
init_resp->set_platform_id(client_->platform_id());
init_resp->set_runtime_type(AsProtoStringData(client_->runtime_type()));
init_resp->set_process_index(client_->process_index());
for (auto* device : client_->devices()) {
InitResponse::Device* d = init_resp->add_devices();
d->set_id(device->Id().value());
d->set_device_kind(AsProtoStringData(device->Kind()));
if (auto default_memory = device->DefaultMemory(); default_memory.ok()) {
d->set_default_memory_id((*default_memory)->Id().value());
}
for (const auto* memory : device->Memories()) {
d->add_memory_ids(memory->Id().value());
}
d->set_debug_string(AsProtoStringData(device->DebugString()));
d->set_to_string(AsProtoStringData(device->ToString()));
if (version_.protocol_version() <= 3) {
for (const auto& [name, attr] : device->Attributes().map()) {
TF_ASSIGN_OR_RETURN(
(*d->mutable_deprecated_attributes())[name],
std::visit(
[&](const auto& attr) { return ToVariantProto(attr.value); },
attr));
}
} else {
*d->mutable_attributes() = device->Attributes().ToProto();
}
}
for (auto* addressable_device : client_->addressable_devices()) {
init_resp->add_addressable_device_ids(addressable_device->Id().value());
}
absl::flat_hash_map<int, xla::ifrt::Memory*> memories;
for (auto* device : client_->devices()) {
for (xla::ifrt::Memory* memory : device->Memories()) {
const auto [it, inserted] =
memories.insert({memory->Id().value(), memory});
if (!inserted && it->second != memory) {
return absl::FailedPreconditionError(absl::StrCat(
"Two memories cannot have the same id: ", memory->ToString(),
" vs. ", it->second->ToString()));
}
}
}
for (const auto& [id, memory] : memories) {
auto* m = init_resp->add_memories();
m->set_id(id);
m->set_memory_space_kind(AsProtoStringData(*memory->Kind().memory_kind()));
for (const auto* device : memory->Devices()) {
m->add_device_ids(device->Id().value());
}
m->set_debug_string(AsProtoStringData(memory->DebugString()));
m->set_to_string(AsProtoStringData(memory->ToString()));
}
return response;
}
Future<BackendInterface::Response> IfrtBackend::HandleCheckFutureRequest(
std::unique_ptr<IfrtRequest> request) {
const CheckFutureRequest& check_request = request->check_future_request();
Future<> future;
{
absl::MutexLock lock(&futures_mutex_);
const auto it = futures_.find(check_request.future_handle());
if (it == futures_.end()) {
return Future<Response>(absl::NotFoundError(absl::StrCat(
"Unknown future handle: ", check_request.future_handle())));
}
future = std::move(it->second);
futures_.erase(it);
}
auto promise = Future<BackendInterface::Response>::CreatePromise();
future.OnReady([op_id = request->request_metadata().op_id(), promise,
hold = future](absl::Status status) mutable {
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
auto ifrt_resp = NewIfrtResponse(op_id);
ifrt_resp->mutable_check_future_response();
promise.Set(std::move(ifrt_resp));
});
return Future<BackendInterface::Response>(std::move(promise));
}
Future<BackendInterface::Response> IfrtBackend::HandleCheckValueReadyRequest(
std::unique_ptr<IfrtRequest> request) {
std::vector<tsl::RCReference<xla::ifrt::Value>> values;
values.reserve(request->check_value_ready_request().value_handles_size());
for (const auto& value_handle :
request->check_value_ready_request().value_handles()) {
auto array = GetArray(value_handle);
if (!array.ok()) {
return Future<Response>(array.status());
}
values.push_back(*std::move(array));
}
auto ifrt_response_promise =
Future<BackendInterface::Response>::CreatePromise();
Future<BackendInterface::Response> ifrt_response_future(
ifrt_response_promise);
client_->GetReadyFuture(values).OnReady(
[op_id = request->request_metadata().op_id(),
promise = std::move(ifrt_response_promise)](
absl::Status status) mutable -> void {
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
auto ifrt_response = NewIfrtResponse(op_id);
ifrt_response->mutable_check_value_ready_response();
promise.Set(std::move(ifrt_response));
});
return ifrt_response_future;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleMakeArrayFromHostBufferRequest(
std::unique_ptr<IfrtRequest> request) {
if (!request->has_make_array_from_host_buffer_request()) {
return absl::InternalError(
"MakeArrayFromHostBuffer got an IfrtRequest with no "
"MakeArrayFromHostBufferRequest in it.");
}
auto* make_array_request =
request->mutable_make_array_from_host_buffer_request();
TF_ASSIGN_OR_RETURN(
auto sharding, Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
make_array_request->sharding()));
const auto byte_strides = [&]() -> std::optional<std::vector<int64_t>> {
if (!make_array_request->has_byte_strides()) return std::nullopt;
return FromByteStridesProto(make_array_request->byte_strides());
}();
TF_ASSIGN_OR_RETURN(const auto shape,
Shape::FromProto(make_array_request->shape()));
TF_ASSIGN_OR_RETURN(const auto dtype,
DType::FromProto(make_array_request->dtype()));
const uint64_t host_buffer_handle = make_array_request->host_buffer_handle();
absl::Cleanup cleanup = [&] {
CHECK_OK(host_buffer_store_->Delete(host_buffer_handle));
};
TF_ASSIGN_OR_RETURN(std::shared_ptr<const std::string> host_buffer,
host_buffer_store_->Lookup(host_buffer_handle));
std::move(cleanup).Invoke();
TF_ASSIGN_OR_RETURN(const auto mem_region,
ArrayMemRegion::FromMinimalMemRegion(
*host_buffer, dtype, shape, byte_strides));
TF_ASSIGN_OR_RETURN(
auto array,
client_->MakeArrayFromHostBuffer(
mem_region.zeroth_element(), dtype, std::move(shape),
std::move(byte_strides), std::move(sharding),
xla::ifrt::Client::HostBufferSemantics::
kImmutableUntilTransferCompletes,
[hold = std::move(host_buffer)]() mutable { hold.reset(); }));
uint64_t handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({handle, std::move(array)});
}
std::unique_ptr<IfrtResponse> response =
NewIfrtResponse(request->request_metadata().op_id());
auto* make_array_resp =
response->mutable_make_array_from_host_buffer_response();
make_array_resp->set_array_handle(handle);
return response;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleAssembleArrayFromSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& assemble_request =
request->assemble_array_from_single_device_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle :
assemble_request.single_device_array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(Shape shape, Shape::FromProto(assemble_request.shape()));
TF_ASSIGN_OR_RETURN(
auto sharding, Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
assemble_request.sharding()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
assemble_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(auto array, client_->AssembleArrayFromSingleDeviceArrays(
std::move(shape), std::move(sharding),
absl::MakeSpan(arrays), semantics));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
uint64_t handle = handle_generator_.New();
ifrt_resp->mutable_assemble_array_from_single_device_arrays_response()
->set_array_handle(handle);
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({handle, std::move(array)});
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleRemapArraysRequest(std::unique_ptr<IfrtRequest> request) {
const auto& remap_request = request->remap_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle : remap_request.array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(
RemapPlan plan,
RemapPlan::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
remap_request.plan()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
remap_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(
auto out_arrays,
client_->RemapArrays(plan, absl::MakeSpan(arrays), semantics));
int64_t num_arrays = out_arrays.size();
auto response = NewIfrtResponse(request->request_metadata().op_id());
auto* handles =
response->mutable_remap_arrays_response()->mutable_array_handles();
handles->Reserve(num_arrays);
uint64_t* handles_buf = handles->AddNAlreadyReserved(num_arrays);
handle_generator_.BulkNew(absl::MakeSpan(handles_buf, num_arrays));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < num_arrays; ++i) {
arrays_.insert({handles_buf[i], out_arrays[i]});
}
}
return response;
}
Future<BackendInterface::Response> IfrtBackend::HandleCopyToHostBufferRequest(
std::unique_ptr<IfrtRequest> request) {
const CopyToHostBufferRequest& copy_to_host =
request->copy_to_host_buffer_request();
auto array = GetArray(copy_to_host.array_handle());
if (!array.ok()) {
return Future<Response>(array.status());
}
std::optional<int> element_size = (*array)->dtype().byte_size();
if (element_size == std::nullopt) {
return Future<Response>(
absl::InternalError("Array element size is unknown."));
}
int64_t host_buffer_size =
(*array)->shape().num_elements() * element_size.value();
auto host_buffer = std::make_unique<std::string>();
host_buffer->resize(host_buffer_size);
const auto byte_strides = [&]() -> std::optional<std::vector<int64_t>> {
if (!copy_to_host.has_byte_strides()) {
return std::nullopt;
}
return FromByteStridesProto(copy_to_host.byte_strides());
}();
const auto mem_region = ArrayMemRegion::FromMinimalMemRegion(
absl::string_view(*host_buffer), (*array)->dtype(), (*array)->shape(),
byte_strides);
if (!mem_region.ok()) {
return Future<Response>(mem_region.status());
}
Future<> copy_status =
(*array)->CopyToHostBuffer(mem_region->zeroth_element(), byte_strides,
ArrayCopySemantics::kAlwaysCopy);
auto resp_promise = Future<BackendInterface::Response>::CreatePromise();
Future<BackendInterface::Response> resp_future(resp_promise);
auto on_ready = [this, op_id = request->request_metadata().op_id(),
host_buffer = std::move(host_buffer),
host_buffer_handle = copy_to_host.host_buffer_handle()](
absl::Status status) mutable
-> absl::StatusOr<std::unique_ptr<IfrtResponse>> {
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(
host_buffer_store_->Store(host_buffer_handle, *std::move(host_buffer)));
std::unique_ptr<IfrtResponse> response = NewIfrtResponse(op_id);
response->mutable_copy_to_host_buffer_response();
return response;
};
copy_status.OnReady(
[promise = std::move(resp_promise), on_ready = std::move(on_ready)](
absl::Status status) mutable { promise.Set(on_ready(status)); });
return resp_future;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDisassembleIntoSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request) {
TF_ASSIGN_OR_RETURN(
auto array,
GetArray(request->disassemble_into_single_device_arrays_request()
.array_handle()));
TF_ASSIGN_OR_RETURN(auto single_device_arrays,
array->DisassembleIntoSingleDeviceArrays(
xla::ifrt::ArrayCopySemantics::kAlwaysCopy));
int64_t num_arrays = single_device_arrays.size();
auto response = NewIfrtResponse(request->request_metadata().op_id());
auto* handles =
response->mutable_disassemble_into_single_device_arrays_response()
->mutable_single_device_array_handles();
handles->Reserve(num_arrays);
uint64_t* handles_buf = handles->AddNAlreadyReserved(num_arrays);
handle_generator_.BulkNew(absl::MakeSpan(handles_buf, num_arrays));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < num_arrays; ++i) {
arrays_.insert({handles_buf[i], single_device_arrays[i]});
}
}
return response;
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleCopyArraysRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& copy_arrays_request = request->copy_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
arrays.reserve(copy_arrays_request.array_handles_size());
for (const auto& handle : copy_arrays_request.array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArray(handle));
}
std::optional<tsl::RCReference<DeviceList>> devices;
if (!copy_arrays_request.device_ids().empty()) {
BasicDeviceList::Devices ds;
for (const auto& device_id : copy_arrays_request.device_ids()) {
TF_ASSIGN_OR_RETURN(ds.emplace_back(),
client_->LookupDevice(DeviceId(device_id)));
}
devices.emplace(BasicDeviceList::Create(std::move(ds)));
}
std::optional<MemoryKind> memory_kind;
if (copy_arrays_request.has_memory_kind()) {
if (const absl::string_view m = copy_arrays_request.memory_kind();
!m.empty()) {
memory_kind.emplace(MemoryKind(m));
} else {
memory_kind.emplace(MemoryKind());
}
}
TF_ASSIGN_OR_RETURN(
auto semantics,
FromArrayCopySemanticsProto(copy_arrays_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(
auto new_arrays,
client_->CopyArrays(absl::MakeSpan(arrays), std::move(devices),
memory_kind, semantics));
std::unique_ptr<IfrtResponse> ifrt_resp =
NewIfrtResponse(request->request_metadata().op_id());
auto* const copy_arrays_resp = ifrt_resp->mutable_copy_arrays_response();
std::vector<uint64_t> new_handles(new_arrays.size());
handle_generator_.BulkNew(absl::MakeSpan(new_handles));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < new_arrays.size(); ++i) {
arrays_.insert({new_handles[i], new_arrays[i]});
copy_arrays_resp->add_array_handles(new_handles[i]);
}
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleReshardRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& reshard_request = request->reshard_request();
TF_ASSIGN_OR_RETURN(auto array, GetArray(reshard_request.array_handle()));
TF_ASSIGN_OR_RETURN(
std::shared_ptr<const Sharding> sharding,
Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
reshard_request.sharding()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
reshard_request.copy_semantics()));
if (!array->sharding().HasSamePartitioning(*sharding)) {
return absl::InvalidArgumentError(absl::StrCat(
"IFRT Proxy does not support resharding, but got ",
array->sharding().DebugString(), " as the original sharding and ",
sharding->DebugString(), " as the target sharding"));
}
TF_ASSIGN_OR_RETURN(
auto copied_arrays,
client_->CopyArrays(absl::MakeSpan(&array, 1), sharding->devices(),
sharding->memory_kind(), semantics));
uint64_t resharded_array_handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({resharded_array_handle, std::move(copied_arrays[0])});
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_reshard_response()->set_array_handle(
resharded_array_handle);
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleFullyReplicatedShardRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& fully_replicated_shard_request =
request->fully_replicated_shard_request();
TF_ASSIGN_OR_RETURN(auto array,
GetArray(fully_replicated_shard_request.array_handle()));
TF_ASSIGN_OR_RETURN(auto semantics,
FromArrayCopySemanticsProto(
fully_replicated_shard_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(auto new_array, array->FullyReplicatedShard(semantics));
uint64_t new_array_handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({new_array_handle, std::move(new_array)});
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_fully_replicated_shard_response()->set_array_handle(
new_array_handle);
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDeleteArrayRequest(std::unique_ptr<IfrtRequest> request) {
std::vector<uint64_t> bad_handles;
std::vector<Future<>> deletion_futures;
auto delete_handle = [&](uint64_t handle) {
auto array = GetArray(handle);
if (array.ok()) {
deletion_futures.push_back(array.value()->Delete());
} else {
deletion_futures.push_back(Future<>(array.status()));
}
};
if (request->delete_array_request().has_array_handle_deprecated()) {
delete_handle(request->delete_array_request().array_handle_deprecated());
}
for (auto array_handle : request->delete_array_request().array_handle()) {
delete_handle(array_handle);
}
uint64_t future_handle = handle_generator_.New();
{
absl::MutexLock lock(&futures_mutex_);
futures_.insert({future_handle, JoinFutures(deletion_futures)});
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_delete_array_response()->set_deletion_future_handle(
future_handle);
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleIsArrayDeletedRequest(std::unique_ptr<IfrtRequest> request) {
TF_ASSIGN_OR_RETURN(
auto array, GetArray(request->is_array_deleted_request().array_handle()));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_is_array_deleted_response()->set_deleted(
array->IsDeleted());
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDestructArrayRequest(std::unique_ptr<IfrtRequest> request) {
std::vector<uint64_t> bad_handles;
{
absl::MutexLock lock(&arrays_mutex_);
for (const uint64_t array_handle :
request->destruct_array_request().array_handle()) {
if (!arrays_.erase(array_handle)) {
bad_handles.push_back(array_handle);
}
}
if (request->destruct_array_request().has_array_handle_deprecated()) {
const uint64_t array_handle =
request->destruct_array_request().array_handle_deprecated();
if (!arrays_.erase(array_handle)) {
bad_handles.push_back(array_handle);
}
}
}
if (!bad_handles.empty()) {
return absl::NotFoundError(absl::StrCat("Unknown array handle(s): ",
absl::StrJoin(bad_handles, ",")));
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_destruct_array_response();
return ifrt_resp;
}
Future<BackendInterface::Response> IfrtBackend::HandleCompileRequest(
std::unique_ptr<IfrtRequest> request) {
auto f = [this, request = std::shared_ptr<IfrtRequest>(
std::move(request))]() -> absl::StatusOr<Response> {
const CompileRequest& compile_request = request->compile_request();
auto deserialize_program_options =
std::make_unique<DeserializeProgramOptions>(
absl::bind_front(&Client::LookupDevice, client_.get()));
TF_ASSIGN_OR_RETURN(
auto program,
Deserialize<xla::ifrt::Program>(
compile_request.program(), std::move(deserialize_program_options)));
TF_ASSIGN_OR_RETURN(auto options, Deserialize<xla::ifrt::CompileOptions>(
compile_request.compile_options(),
nullptr));
std::vector<std::shared_ptr<RemoteLoadedHostCallbackQueue>>
host_callback_queues;
{
std::vector<tsl::RCReference<xla::ifrt::LoadedHostCallback>>
loaded_host_callbacks;
for (int i = 0; i < compile_request.host_callbacks_size(); ++i) {
host_callback_queues.emplace_back(
std::make_shared<RemoteLoadedHostCallbackQueue>());
TF_ASSIGN_OR_RETURN(
loaded_host_callbacks.emplace_back(),
RemoteLoadedHostCallback::CreateFromSerialized(
client_.get(), compile_request.host_callbacks(i),
host_callback_queues.back()));
}
if (!loaded_host_callbacks.empty()) {
if (auto xla_options =
llvm::dyn_cast<xla::ifrt::XlaCompileOptions>(options.get())) {
xla_options->loaded_host_callbacks = std::move(loaded_host_callbacks);
} else {
return absl::UnimplementedError(
"Host callbacks are supported only for XLA-like IFRT "
"implementations using `xla::ifrt::XlaCompileOptions`");
}
}
}
TF_ASSIGN_OR_RETURN(auto executable,
client_->GetDefaultCompiler()->Compile(
std::move(program), std::move(options)));
std::unique_ptr<IfrtResponse> ifrt_resp =
NewIfrtResponse(request->request_metadata().op_id());
auto* compile_resp = ifrt_resp->mutable_compile_response();
uint64_t handle = handle_generator_.New();
compile_resp->set_loaded_executable_handle(handle);
std::vector<uint64_t> host_callback_handles(host_callback_queues.size());
handle_generator_.BulkNew(absl::MakeSpan(host_callback_handles));
compile_resp->mutable_loaded_host_callback_handles()->Add(
host_callback_handles.begin(), host_callback_handles.end());
compile_resp->set_name(AsProtoStringData(executable->name()));
compile_resp->set_num_devices(executable->num_devices());
for (const auto* device : executable->addressable_devices()) {
compile_resp->add_addressable_device_ids(device->Id().value());
}
auto fingerprint = executable->Fingerprint();
if (!fingerprint.ok()) {
*compile_resp->mutable_fingerprint_error() =
tsl::StatusToProto(fingerprint.status());
} else if (fingerprint->has_value()) {
compile_resp->set_fingerprint_value(std::move(fingerprint)->value());
}
{
absl::MutexLock lock(&futures_mutex_);
compile_resp->set_ready_future_handle(handle_generator_.New());
futures_.insert(
{compile_resp->ready_future_handle(), executable->GetReadyFuture()});
}
{
absl::MutexLock lock(&executables_mutex_);
executables_.insert({handle, std::move(executable)});
}
{
absl::MutexLock lock(&host_callback_queues_mutex_);
for (int i = 0; i < host_callback_queues.size(); ++i) {
host_callback_queues_.insert(
{host_callback_handles[i], std::move(host_callback_queues[i])});
}
}
return ifrt_resp;
};
return AsyncExecute(std::move(f), &compile_thread_pool_);
}
Future<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableMetadataRequest(
std::unique_ptr<IfrtRequest> request) {
return AsyncExecute([this, request = std::shared_ptr<IfrtRequest>(std::move(
request))]() -> absl::StatusOr<Response> {
const uint64_t handle = request->loaded_executable_metadata_request()
.loaded_executable_handle();
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(handle));
std::unique_ptr<IfrtResponse> ifrt_resp =
NewIfrtResponse(request->request_metadata().op_id());
auto* metadata_resp =
ifrt_resp->mutable_loaded_executable_metadata_response();
if (auto parameter_shardings = executable->GetParameterShardings();
parameter_shardings.has_value()) {
metadata_resp->mutable_parameter_shardings()->mutable_shardings()->Add(
parameter_shardings->begin(), parameter_shardings->end());
}
if (auto output_shardings = executable->GetOutputShardings();
output_shardings.has_value()) {
metadata_resp->mutable_output_shardings()->mutable_shardings()->Add(
output_shardings->begin(), output_shardings->end());
}
if (auto parameter_layouts = executable->GetParameterLayouts();
parameter_layouts.ok()) {
auto* const layouts =
metadata_resp->mutable_parameter_layouts_list()->mutable_layouts();
for (const std::unique_ptr<xla::PjRtLayout>& parameter_layout :
*parameter_layouts) {
const xla::PjRtXlaLayout* layout =
dynamic_cast<const xla::PjRtXlaLayout*>(parameter_layout.get());
TF_RET_CHECK(layout != nullptr)
<< "IFRT proxy only supports PjRtXlaLayout, got a different "
"subclass";
layouts->Add(layout->xla_layout().ToProto());
}
} else {
*metadata_resp->mutable_parameter_layouts_error() =
tsl::StatusToProto(parameter_layouts.status());
}
if (auto output_layouts = executable->GetOutputLayouts();
output_layouts.ok()) {
auto* const layouts =
metadata_resp->mutable_output_layouts_list()->mutable_layouts();
for (const std::unique_ptr<xla::PjRtLayout>& output_layout :
*output_layouts) {
const xla::PjRtXlaLayout* layout =
dynamic_cast<const xla::PjRtXlaLayout*>(output_layout.get());
TF_RET_CHECK(layout != nullptr)
<< "IFRT proxy only supports PjRtXlaLayout, got a different "
"subclass";
layouts->Add(layout->xla_layout().ToProto());
}
} else {
*metadata_resp->mutable_output_layouts_error() =
tsl::StatusToProto(output_layouts.status());
}
auto output_memory_kinds = executable->GetOutputMemoryKinds();
if (output_memory_kinds.ok()) {
for (const auto& memory_kinds : *output_memory_kinds) {
auto* const list = metadata_resp->mutable_output_memory_kinds()
->add_memory_kind_lists()
->mutable_memory_kinds();
list->Reserve(memory_kinds.size());
list->Add(memory_kinds.begin(), memory_kinds.end());
}
} else {
*metadata_resp->mutable_output_memory_kinds()->mutable_status() =
tsl::StatusToProto(output_memory_kinds.status());
}
return ifrt_resp;
});
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableExecuteRequest(
std::unique_ptr<IfrtRequest> request) {
const LoadedExecutableExecuteRequest& execute =
request->loaded_executable_execute_request();
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(execute.loaded_executable_handle()));
std::vector<tsl::RCReference<xla::ifrt::Array>> args;
args.reserve(execute.args_handles_size());
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle : execute.args_handles()) {
TF_ASSIGN_OR_RETURN(args.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(auto execute_options,
xla::ifrt::LoadedExecutable::ExecuteOptions::FromProto(
execute.execute_options()));
if (version_.protocol_version() < 6) {
execute_options.fill_status = true;
}
std::optional<tsl::RCReference<DeviceList>> devices;
if (!execute.device_ids().empty()) {
BasicDeviceList::Devices d;
d.reserve(execute.device_ids_size());
for (const int32_t device_id : execute.device_ids()) {
TF_ASSIGN_OR_RETURN(d.emplace_back(),
client_->LookupDevice(DeviceId(device_id)));
}
devices = BasicDeviceList::Create(std::move(d));
}
TF_ASSIGN_OR_RETURN(
xla::ifrt::LoadedExecutable::ExecuteResult result,
executable->Execute(absl::MakeSpan(args), execute_options, devices));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
LoadedExecutableExecuteResponse* execute_response =
ifrt_resp->mutable_loaded_executable_execute_response();
if (version_.protocol_version() < 6 || execute_options.fill_status) {
absl::MutexLock lock(&futures_mutex_);
execute_response->set_status_handle(handle_generator_.New());
futures_.insert(
{execute_response->status_handle(), std::move(result.status)});
}
std::vector<uint64_t> output_handles(result.outputs.size());
handle_generator_.BulkNew(absl::MakeSpan(output_handles));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < result.outputs.size(); ++i) {
tsl::RCReference<xla::ifrt::Array>& array = result.outputs[i];
LoadedExecutableExecuteResponse::Output* output =
execute_response->add_outputs();
*output->mutable_dtype() = array->dtype().ToProto();
*output->mutable_shape() = array->shape().ToProto();
TF_ASSIGN_OR_RETURN(*output->mutable_sharding(),
array->sharding().ToProto());
output->set_array_handle(output_handles[i]);
arrays_.insert({output_handles[i], std::move(array)});
}
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableDeleteRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& del = request->loaded_executable_delete_request();
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(del.loaded_executable_handle()));
Future<> future = executable->Delete();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
auto* del_response = ifrt_resp->mutable_loaded_executable_delete_response();
{
absl::MutexLock lock(&futures_mutex_);
del_response->set_future_handle(handle_generator_.New());
futures_.insert({del_response->future_handle(), std::move(future)});
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableIsDeletedRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& is_deleted = request->loaded_executable_is_deleted_request();
TF_ASSIGN_OR_RETURN(
std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(is_deleted.loaded_executable_handle()));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
auto* is_deleted_response =
ifrt_resp->mutable_loaded_executable_is_deleted_response();
is_deleted_response->set_is_deleted(executable->IsDeleted());
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableDestructRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& destruct = request->loaded_executable_destruct_request();
std::shared_ptr<xla::ifrt::LoadedExecutable> executable;
{
absl::MutexLock lock(&executables_mutex_);
const auto it = executables_.find(destruct.loaded_executable_handle());
if (it == executables_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown loaded executable handle: ",
destruct.loaded_executable_handle()));
}
executable = std::move(it->second);
executables_.erase(it);
}
executable.reset();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_loaded_executable_destruct_response();
return ifrt_resp;
}
Future<BackendInterface::Response>
IfrtBackend::HandleLoadedHostCallbackPollRequest(
std::unique_ptr<IfrtRequest> request) {
return AsyncExecute([this, request = std::shared_ptr<IfrtRequest>(std::move(
request))]() -> absl::StatusOr<Response> {
const auto& poll = request->loaded_host_callback_poll_request();
const uint64_t handle = poll.loaded_host_callback_handle();
std::shared_ptr<RemoteLoadedHostCallbackQueue> queue;
{
absl::MutexLock lock(&host_callback_queues_mutex_);
auto it = host_callback_queues_.find(handle);
if (it == host_callback_queues_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown loaded host callback handle: ", handle));
}
queue = it->second;
}
auto execution_request = queue->Pop();
if (!execution_request.has_value()) {
{
absl::MutexLock lock(&host_callback_queues_mutex_);
host_callback_queues_.erase(handle);
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_loaded_host_callback_poll_response();
return ifrt_resp;
}
absl::Cleanup cleanup = [&] {
std::move(execution_request)
->status.Set(absl::UnknownError(
"Unable to enqueue the host callback execution"));
};
{
std::string buffer;
for (const auto& operand : execution_request->operands) {
buffer.append(static_cast<const char*>(operand.data), operand.size);
}
TF_RETURN_IF_ERROR(host_buffer_store_->Store(
poll.operand_host_buffer_handle(), std::move(buffer)));
}
const uint64_t execution_handle = handle_generator_.New();
{
absl::MutexLock lock(&host_callback_executions_mutex_);
host_callback_executions_.insert(
{execution_handle, *std::move(execution_request)});
}
std::move(cleanup).Cancel();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
auto* poll_response =
ifrt_resp->mutable_loaded_host_callback_poll_response();
poll_response->set_host_callback_execution_handle(execution_handle);
return ifrt_resp;
});
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedHostCallbackReturnRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& ret = request->loaded_host_callback_return_request();
RemoteLoadedHostCallbackQueue::ExecutionRequest execution_request;
{
absl::MutexLock lock(&host_callback_executions_mutex_);
const auto it =
host_callback_executions_.find(ret.host_callback_execution_handle());
if (it == host_callback_executions_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown host callback execution: ",
ret.host_callback_execution_handle()));
}
execution_request = std::move(it->second);
host_callback_executions_.erase(it);
}
absl::Cleanup cleanup = [&] {
std::move(execution_request)
.status.Set(absl::UnknownError(
"Unable to process the host callback execution results"));
};
absl::Status status;
if (ret.has_result_host_buffer_handle()) {
TF_ASSIGN_OR_RETURN(
std::shared_ptr<const std::string> buffer,
host_buffer_store_->Lookup(ret.result_host_buffer_handle()));
absl::Cleanup cleanup = [&] {
CHECK_OK(host_buffer_store_->Delete(ret.result_host_buffer_handle()));
};
int64_t offset = 0;
for (const auto& result : execution_request.results) {
if (offset + result.size > buffer->size()) {
return absl::InternalError(
absl::StrCat("Buffer overflow while reading host callback "
"execution results; ",
"range: [", offset, ", ", offset + result.size, "), ",
"buffer size: ", buffer->size()));
}
std::memcpy(result.data, buffer->data() + offset, result.size);
offset += result.size;
}
if (offset != buffer->size()) {
return absl::InternalError(
absl::StrCat("Host callback execution did not consume the entire "
"result buffer; size: ",
buffer->size(), "; consumed: ", offset));
}
} else {
status = tsl::StatusFromProto(ret.error());
}
std::move(execution_request).status.Set(std::move(status));
std::move(cleanup).Cancel();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_loaded_host_callback_return_response();
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleGetDefaultDeviceAssignmentRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& get_default_device_assignment_request =
request->get_default_device_assignment_request();
TF_ASSIGN_OR_RETURN(
auto assignment,
client_->GetDefaultDeviceAssignment(
get_default_device_assignment_request.num_replicas(),
get_default_device_assignment_request.num_partitions()));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
assignment.Serialize(
ifrt_resp->mutable_get_default_device_assignment_response()
->mutable_device_assignment());
return ifrt_resp;
}
absl::StatusOr<std::shared_ptr<xla::ifrt::LoadedExecutable>>
IfrtBackend::GetLoadedExecutable(uint64_t handle) {
absl::MutexLock lock(&executables_mutex_);
auto it = executables_.find(handle);
if (it == executables_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown loaded executable handle: ", handle));
}
return it->second;
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> IfrtBackend::GetArray(
uint64_t array_handle) {
absl::ReaderMutexLock lock(&arrays_mutex_);
return GetArrayLocked(array_handle);
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> IfrtBackend::GetArrayLocked(
uint64_t array_handle) {
auto it = arrays_.find(array_handle);
if (it == arrays_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown array handle: ", array_handle));
}
return it->second;
}
}
}
} | #include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include <sys/types.h>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::_;
using ::testing::ByMove;
using ::testing::DoAll;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::Invoke;
using ::testing::Not;
using ::testing::NotNull;
using ::testing::Optional;
using ::testing::Pointee;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
#if defined(PLATFORM_GOOGLE)
using ::testing::EquivToProto;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
using ::testing::proto::Partially;
#endif
constexpr uint64_t kSessionId = 12345;
class IfrtBackendTest
: public ::testing::TestWithParam<int> {
protected:
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(GetParam());
return version;
}
};
std::unique_ptr<IfrtRequest> NewIfrtRequest(uint64_t op_id) {
auto ifrt_request = std::make_unique<IfrtRequest>();
auto* request_metadata = ifrt_request->mutable_request_metadata();
request_metadata->set_op_id(op_id);
return ifrt_request;
}
TEST_P(IfrtBackendTest, CreationFailsWithNullIfrtClient) {
EXPECT_THAT(IfrtBackend::Create(Version(), kSessionId, nullptr, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_P(IfrtBackendTest, SuccessfulCreation) {
auto ifrt_client = std::make_unique<MockClient>();
ASSERT_THAT(IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()),
IsOk());
}
TEST_P(IfrtBackendTest, ShutdownSucceeds) {
auto ifrt_client = std::make_unique<MockClient>();
TF_ASSERT_OK_AND_ASSIGN(
auto ifrt_backend,
IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()));
}
TEST_P(IfrtBackendTest, ProcessFailsWithNoRequestSet) {
auto ifrt_client = std::make_unique<MockClient>();
TF_ASSERT_OK_AND_ASSIGN(
auto ifrt_backend,
IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()));
auto request = std::make_unique<IfrtRequest>();
auto process_status = ifrt_backend->Process(std::move(request)).Await();
ASSERT_THAT(process_status, Not(IsOk()));
}
INSTANTIATE_TEST_SUITE_P(
IfrtBackendTestWithAllVersions, IfrtBackendTest,
testing::Range(kServerMinVersion, kServerMaxVersion + 1),
[](const testing::TestParamInfo<IfrtBackendTest::ParamType>& info) {
return absl::StrCat(info.param);
});
struct TestProgram : llvm::RTTIExtends<TestProgram, Program> {
static char ID;
};
[[maybe_unused]] char TestProgram::ID = 0;
class TestProgramSerDes : public llvm::RTTIExtends<TestProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::proxy::TestProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
CHECK(llvm::isa<TestProgram>(serializable));
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
return std::make_unique<TestProgram>();
}
static char ID;
};
[[maybe_unused]] char TestProgramSerDes::ID = 0;
struct TestCompileOptions
: llvm::RTTIExtends<TestCompileOptions, CompileOptions> {
static char ID;
};
[[maybe_unused]] char TestCompileOptions::ID = 0;
class TestCompileOptionsSerDes
: public llvm::RTTIExtends<TestCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::proxy::TestCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
CHECK(llvm::isa<TestCompileOptions>(serializable));
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
return std::make_unique<TestCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char TestCompileOptionsSerDes::ID = 0;
class IfrtBackendHandlerTest : public IfrtBackendTest {
protected:
static void SetUpTestSuite() {
RegisterSerDes<TestProgram>(std::make_unique<TestProgramSerDes>());
RegisterSerDes<TestCompileOptions>(
std::make_unique<TestCompileOptionsSerDes>());
}
void SetUp() override {
auto mock_client = std::make_unique<xla::ifrt::MockClient>();
std::vector<xla::ifrt::Device*> raw_device_ptrs;
for (int i = 0; i < 2; ++i) {
auto mock_device = std::make_unique<xla::ifrt::MockDevice>();
ON_CALL(*mock_device, Id()).WillByDefault(Return(DeviceId(i)));
raw_device_ptrs.push_back(mock_device.get());
mock_devices_.push_back(std::move(mock_device));
}
ON_CALL(*mock_client, devices()).WillByDefault(Return(raw_device_ptrs));
ON_CALL(*mock_client, LookupDevice(_))
.WillByDefault(
Invoke([this](DeviceId id) -> absl::StatusOr<xla::ifrt::Device*> {
if (id.value() < 0 || id.value() >= mock_devices_.size()) {
return absl::NotFoundError(
absl::StrCat("Unknown device id: ", id.value()));
}
return mock_devices_[id.value()].get();
}));
mock_client_ = mock_client.get();
EXPECT_CALL(*mock_client_, GetDefaultCompiler)
.WillRepeatedly(Return(&mock_compiler_));
host_buffer_store_ = std::make_shared<HostBufferStore>();
TF_ASSERT_OK_AND_ASSIGN(
backend_,
IfrtBackend::Create(Version(), kSessionId, std::move(mock_client),
host_buffer_store_));
}
absl::StatusOr<std::shared_ptr<IfrtResponse>> CallBackend(
std::unique_ptr<IfrtRequest> request) {
auto response_future = backend_->Process(std::move(request));
return std::move(response_future).Await();
}
uint64_t NewOpId() {
absl::MutexLock lock(&mu_);
return current_op_id_++;
}
uint64_t NewHostBufferHandle() { return current_host_buffer_handle_++; }
absl::StatusOr<uint64_t> MakeTestArray(tsl::RCReference<Array> mock_array) {
EXPECT_CALL(*mock_client_, MakeArrayFromHostBuffer(_, _, _, _, _, _, _))
.WillOnce(Return(std::move(mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
{
const uint64_t host_buffer_handle = NewHostBufferHandle();
TF_RETURN_IF_ERROR(
host_buffer_store_->Store(host_buffer_handle, "01234567"));
auto* make_array =
ifrt_request->mutable_make_array_from_host_buffer_request();
make_array->mutable_dtype()->set_kind(DTypeProto::KIND_S32);
make_array->mutable_shape()->add_dims(2);
make_array->set_host_buffer_handle(host_buffer_handle);
TF_ASSIGN_OR_RETURN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSIGN_OR_RETURN(
*make_array->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
TF_ASSIGN_OR_RETURN(auto make_array_response,
CallBackend(std::move(ifrt_request)));
TF_RETURN_IF_ERROR(tsl::StatusFromProto(
make_array_response->response_metadata().status()));
return make_array_response->make_array_from_host_buffer_response()
.array_handle();
}
absl::StatusOr<CompileResponse> CompileTestLoadedExecutable(
absl::StatusOr<std::unique_ptr<LoadedExecutable>> loaded_executable) {
auto request = NewIfrtRequest(NewOpId());
CompileRequest* compile_request = request->mutable_compile_request();
TestProgram program;
TF_ASSIGN_OR_RETURN(*compile_request->mutable_program(),
Serialize(program));
TestCompileOptions compile_options;
TF_ASSIGN_OR_RETURN(*compile_request->mutable_compile_options(),
Serialize(compile_options));
EXPECT_CALL(mock_compiler_, Compile(_, _))
.WillOnce(Return(ByMove(std::move(loaded_executable))));
TF_ASSIGN_OR_RETURN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
TF_RET_CHECK(response->has_compile_response());
return response->compile_response();
}
absl::Status CheckFuture(uint64_t handle) {
if (handle == 0) {
return absl::InternalError("Test error, future handle is 0");
}
auto request = NewIfrtRequest(NewOpId());
request->mutable_check_future_request()->set_future_handle(handle);
TF_ASSIGN_OR_RETURN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
return tsl::StatusFromProto(response->response_metadata().status());
}
xla::ifrt::MockClient* mock_client_;
xla::ifrt::MockCompiler mock_compiler_;
std::vector<std::unique_ptr<xla::ifrt::MockDevice>> mock_devices_;
std::shared_ptr<HostBufferStore> host_buffer_store_;
private:
absl::Mutex mu_;
uint64_t current_op_id_ ABSL_GUARDED_BY(mu_) = 1;
uint64_t current_host_buffer_handle_ = 1;
std::unique_ptr<IfrtBackend> backend_;
};
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, Init) {
EXPECT_CALL(*mock_client_, platform_name())
.WillRepeatedly(Return("ifrt_backend"));
EXPECT_CALL(*mock_client_, platform_version()).WillRepeatedly(Return("n/a"));
EXPECT_CALL(*mock_client_, platform_id()).WillRepeatedly(Return(42));
EXPECT_CALL(*mock_client_, process_index()).WillRepeatedly(Return(1));
EXPECT_CALL(*mock_client_, runtime_type())
.WillRepeatedly(Return("ifrt-service"));
std::vector<std::vector<xla::ifrt::Device*>> mock_memory_devices;
mock_memory_devices.reserve(mock_devices_.size());
for (const auto& mock_device : mock_devices_) {
mock_memory_devices.push_back({mock_device.get()});
}
std::vector<MockMemory> mock_memories(mock_devices_.size());
MemoryKind kind("mock");
for (int i = 0; i < mock_memories.size(); ++i) {
MockMemory& memory = mock_memories[i];
EXPECT_CALL(memory, Devices())
.WillRepeatedly(Return(mock_memory_devices[i]));
EXPECT_CALL(memory, Id()).WillRepeatedly(Return(MemoryId(i)));
EXPECT_CALL(memory, Kind()).WillRepeatedly(ReturnRef(kind));
}
std::vector<std::vector<Memory*>> device_memories;
device_memories.reserve(mock_devices_.size());
for (int i = 0; i < mock_devices_.size(); ++i) {
device_memories.push_back({&mock_memories[i]});
}
std::vector<AttributeMap> device_attributes;
device_attributes.reserve(mock_devices_.size());
for (int i = 0; i < mock_devices_.size(); ++i) {
AttributeMap::Map map;
map.insert({"name", AttributeMap::StringValue(absl::StrCat("device", i))});
device_attributes.push_back(AttributeMap(std::move(map)));
MockDevice& mock_device = *mock_devices_[i];
EXPECT_CALL(mock_device, Kind()).WillRepeatedly(Return("mock"));
EXPECT_CALL(mock_device, Memories())
.WillRepeatedly(Return(device_memories[i]));
EXPECT_CALL(mock_device, DefaultMemory())
.WillRepeatedly(Return(&mock_memories[i]));
EXPECT_CALL(mock_device, Attributes())
.WillRepeatedly(ReturnRef(device_attributes[i]));
}
auto request = NewIfrtRequest(NewOpId());
request->mutable_init_request();
if (Version().protocol_version() <= 3) {
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(
Partially(IgnoringRepeatedFieldOrdering(EquivToProto(R"pb(
init_response {
session_id: 12345
platform_name: "ifrt_backend"
platform_version: "n/a"
platform_id: 42
process_index: 1
runtime_type: "ifrt-service"
devices {
id: 0
device_kind: "mock"
default_memory_id: 0
memory_ids: [ 0 ]
deprecated_attributes {
key: "name"
value { string_value: "device0" }
}
}
devices {
id: 1
device_kind: "mock"
default_memory_id: 1
memory_ids: [ 1 ]
deprecated_attributes {
key: "name"
value { string_value: "device1" }
}
}
memories {
id: 0
memory_space_kind: "mock"
device_ids: [ 0 ]
}
memories {
id: 1
memory_space_kind: "mock"
device_ids: [ 1 ]
}
}
)pb"))))));
} else {
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(
Partially(IgnoringRepeatedFieldOrdering(EquivToProto(R"pb(
init_response {
session_id: 12345
platform_name: "ifrt_backend"
platform_version: "n/a"
platform_id: 42
process_index: 1
runtime_type: "ifrt-service"
devices {
id: 0
device_kind: "mock"
default_memory_id: 0
memory_ids: [ 0 ]
attributes {
attributes {
key: "name"
value { string_value: "device0" }
}
}
}
devices {
id: 1
device_kind: "mock"
default_memory_id: 1
memory_ids: [ 1 ]
attributes {
attributes {
key: "name"
value { string_value: "device1" }
}
}
}
memories {
id: 0
memory_space_kind: "mock"
device_ids: [ 0 ]
}
memories {
id: 1
memory_space_kind: "mock"
device_ids: [ 1 ]
}
}
)pb"))))));
}
}
#endif
TEST_P(IfrtBackendHandlerTest, DisassembleIntoSingleDeviceArraysSucceeds) {
std::vector<tsl::RCReference<xla::ifrt::Array>> single_device_arrays;
single_device_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
single_device_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
tsl::RCReference<xla::ifrt::MockArray> source_mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*source_mock_array, DisassembleIntoSingleDeviceArrays(_))
.WillOnce(Return(std::move(single_device_arrays)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(source_mock_array)));
auto disassemble_request = NewIfrtRequest(NewOpId());
disassemble_request->mutable_disassemble_into_single_device_arrays_request()
->set_array_handle(array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto disassemble_response,
CallBackend(std::move(disassemble_request)));
EXPECT_THAT(
disassemble_response->disassemble_into_single_device_arrays_response()
.single_device_array_handles(),
SizeIs(2));
}
TEST_P(IfrtBackendHandlerTest, MakeArrayFromHostBufferSuccess) {
const uint64_t kHostBufferHandle = 1234;
ASSERT_THAT(
host_buffer_store_->Store(kHostBufferHandle, std::string(480, 'a')),
IsOk());
auto ifrt_request = NewIfrtRequest(NewOpId());
{
auto* make_array =
ifrt_request->mutable_make_array_from_host_buffer_request();
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
dtype { kind: KIND_F64 }
shape { dims: [ 5, 3, 4 ] }
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
make_array));
make_array->set_host_buffer_handle(kHostBufferHandle);
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*make_array->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
const Shape expected_shape({5, 3, 4});
const std::vector<int64_t> expected_byte_strides_vec = {8, 40, 120};
const std::optional<absl::Span<const int64_t>> expected_byte_strides =
absl::Span<const int64_t>(expected_byte_strides_vec);
tsl::RCReference<xla::ifrt::MockArray> mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_client_,
MakeArrayFromHostBuffer(_, DType(DType::kF64), expected_shape,
expected_byte_strides, _, _, _))
.WillOnce(Return(std::move(mock_array)));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->make_array_from_host_buffer_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, AssembleArrayFromSingleDeviceArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
{
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
shape { dims: [ 2, 2 ] }
copy_semantics: ARRAY_COPY_SEMANTICS_ALWAYS_COPY
)pb",
ifrt_request
->mutable_assemble_array_from_single_device_arrays_request()));
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request
->mutable_assemble_array_from_single_device_arrays_request()
->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
std::vector<tsl::RCReference<xla::ifrt::MockArray>> single_device_arrays;
for (int i = 0; i < 2; ++i) {
auto array = tsl::MakeRef<xla::ifrt::MockArray>();
single_device_arrays.push_back(array);
TF_ASSERT_OK_AND_ASSIGN(uint64_t array_handle, MakeTestArray(array));
ifrt_request->mutable_assemble_array_from_single_device_arrays_request()
->add_single_device_array_handles(array_handle);
}
tsl::RCReference<xla::ifrt::MockArray> result =
tsl::MakeRef<xla::ifrt::MockArray>();
const Shape expected_shape({2, 2});
EXPECT_CALL(*mock_client_,
AssembleArrayFromSingleDeviceArrays(
expected_shape, _, ElementsAreArray(single_device_arrays), _))
.WillOnce(Return(std::move(result)));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->assemble_array_from_single_device_arrays_response()
.array_handle(),
0);
}
TEST_P(IfrtBackendHandlerTest, CopyToHostSuccess) {
Shape shape({5, 3, 4});
tsl::RCReference<xla::ifrt::MockArray> array =
tsl::MakeRef<xla::ifrt::MockArray>();
ON_CALL(*array, shape()).WillByDefault(ReturnRef(shape));
ON_CALL(*array, dtype()).WillByDefault(Return(DType(DType::kF64)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle, MakeTestArray(array));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* copy_to_host = ifrt_request->mutable_copy_to_host_buffer_request();
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
copy_to_host));
copy_to_host->set_array_handle(array_handle);
const uint64_t host_buffer_handle = NewHostBufferHandle();
copy_to_host->set_host_buffer_handle(host_buffer_handle);
const std::vector<int64_t> expected_byte_strides_vec = {8, 40, 120};
const std::optional<absl::Span<const int64_t>> expected_byte_strides =
absl::Span<const int64_t>(expected_byte_strides_vec);
EXPECT_CALL(*array, CopyToHostBuffer(_, expected_byte_strides, _))
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(host_buffer_store_->Lookup(host_buffer_handle),
IsOkAndHolds(Pointee(SizeIs(480))));
}
TEST_P(IfrtBackendHandlerTest, CopyToHostFailsWithNonExistentArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
ifrt_request->mutable_copy_to_host_buffer_request()));
ifrt_request->mutable_copy_to_host_buffer_request()->set_array_handle(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
DisassembleIntoSingleArrayFailsWhenBackendRuntimeFails) {
constexpr absl::string_view kDisassembleErrorMessage =
"Some test-injected error message that is unlikely to match other error "
"messages - 1234";
tsl::RCReference<xla::ifrt::MockArray> source_mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*source_mock_array, DisassembleIntoSingleDeviceArrays(_))
.WillOnce(Return(absl::UnknownError(kDisassembleErrorMessage)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(source_mock_array)));
auto disassemble_request = NewIfrtRequest(NewOpId());
disassemble_request->mutable_disassemble_into_single_device_arrays_request()
->set_array_handle(array_handle);
ASSERT_THAT(
CallBackend(std::move(disassemble_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq(kDisassembleErrorMessage)));
}
MATCHER_P(EqualsDeviceList, device_list, "") { return *arg == *device_list; }
TEST_P(IfrtBackendHandlerTest, CopyArrays) {
std::vector<tsl::RCReference<xla::ifrt::Array>> src_arrays;
src_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
std::vector<tsl::RCReference<xla::ifrt::Array>> copied_arrays;
copied_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
BasicDeviceList::Devices ds;
TF_ASSERT_OK_AND_ASSIGN(ds.emplace_back(),
mock_client_->LookupDevice(DeviceId(1)));
tsl::RCReference<DeviceList> devices = BasicDeviceList::Create(std::move(ds));
MemoryKind memory_kind("device");
EXPECT_CALL(*mock_client_, CopyArrays(ElementsAreArray(src_arrays),
Optional(EqualsDeviceList(devices)),
Optional(memory_kind),
ArrayCopySemantics::kAlwaysCopy))
.WillOnce(Return(
std::vector<tsl::RCReference<xla::ifrt::Array>>(copied_arrays)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* copy_arrays_request = ifrt_request->mutable_copy_arrays_request();
for (const auto& src_array : src_arrays) {
TF_ASSERT_OK_AND_ASSIGN(auto src_array_handle, MakeTestArray(src_array));
copy_arrays_request->add_array_handles(src_array_handle);
}
for (const auto& device : devices->devices()) {
copy_arrays_request->add_device_ids(device->Id().value());
}
copy_arrays_request->set_memory_kind(std::string(*memory_kind.memory_kind()));
copy_arrays_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(response->response_metadata().status()),
IsOk());
EXPECT_THAT(response->copy_arrays_response().array_handles(),
SizeIs(copied_arrays.size()));
}
TEST_P(IfrtBackendHandlerTest, ReshardSuccess) {
auto src_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(0)));
auto src_sharding = SingleDeviceSharding::Create(device, MemoryKind());
ON_CALL(*src_mock_array, sharding()).WillByDefault(ReturnRef(*src_sharding));
TF_ASSERT_OK_AND_ASSIGN(auto src_array_handle,
MakeTestArray(std::move(src_mock_array)));
auto copied_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_client_, CopyArrays(_, _, _, _))
.WillOnce(Return(std::vector<tsl::RCReference<xla::ifrt::Array>>(
{copied_mock_array})));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(src_array_handle);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto* new_device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request->mutable_reshard_request()->mutable_sharding(),
SingleDeviceSharding::Create(new_device, MemoryKind())->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(response->response_metadata().status()),
IsOk());
EXPECT_NE(response->reshard_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, ReshardFailsWhenTheBackendFails) {
auto mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
auto sharding = SingleDeviceSharding::Create(device, MemoryKind());
ON_CALL(*mock_array, sharding()).WillByDefault(ReturnRef(*sharding));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
EXPECT_CALL(*mock_client_, CopyArrays(_, _, _, _))
.WillOnce(Return(absl::UnknownError("injected error")));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(array_handle);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto* new_device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request->mutable_reshard_request()->mutable_sharding(),
SingleDeviceSharding::Create(new_device, MemoryKind())->ToProto());
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
TEST_P(IfrtBackendHandlerTest, ReshardFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(0);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
reshard_request->mutable_sharding();
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, FullyReplicatedShardSuccess) {
auto fully_replicated_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
auto resultant_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*fully_replicated_mock_array, FullyReplicatedShard(_))
.WillOnce(Return(std::move(resultant_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto fully_replicated_array_handle,
MakeTestArray(std::move(fully_replicated_mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(
fully_replicated_array_handle);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->fully_replicated_shard_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, FullyReplicatedShardFailure) {
auto fully_replicated_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*fully_replicated_mock_array, FullyReplicatedShard(_))
.WillOnce(Return(absl::UnknownError("injected error")));
TF_ASSERT_OK_AND_ASSIGN(
auto fully_replicated_array_handle,
MakeTestArray(std::move(fully_replicated_mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(
fully_replicated_array_handle);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
TEST_P(IfrtBackendHandlerTest,
FullyReplicatedShardFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(0);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
CheckArrayReadyRequestRelaysTheResultFromBackend) {
auto mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
EXPECT_CALL(*mock_client_, GetReadyFuture(_))
.WillOnce(Return(Future<>(absl::OkStatus())))
.WillOnce(Return(Future<>(absl::UnknownError("injected error"))));
{
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_check_value_ready_request()->add_value_handles(
array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto ifrt_response,
CallBackend(std::move(ifrt_request)));
EXPECT_THAT(ifrt_response->response_metadata().status().code(),
tensorflow::error::OK);
EXPECT_TRUE(ifrt_response->has_check_value_ready_response());
}
{
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_check_value_ready_request()->add_value_handles(
array_handle);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
}
TEST_P(IfrtBackendHandlerTest,
CheckArrayReadyRequestFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_check_value_ready_request()->add_value_handles(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, DeleteArraySuccess) {
auto mock_array1 = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array1, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
auto mock_array2 = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array2, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle1,
MakeTestArray(std::move(mock_array1)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle2,
MakeTestArray(std::move(mock_array2)));
uint64_t op_id = NewOpId();
auto ifrt_request = NewIfrtRequest(op_id);
ifrt_request->mutable_delete_array_request()->add_array_handle(array_handle1);
ifrt_request->mutable_delete_array_request()->add_array_handle(array_handle2);
TF_ASSERT_OK_AND_ASSIGN(auto resp, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(resp->response_metadata().status()), IsOk());
TF_EXPECT_OK(
CheckFuture(resp->delete_array_response().deletion_future_handle()));
}
TEST_P(IfrtBackendHandlerTest,
DeleteArrayReturnsFutureWithNonExistentArrayHandle) {
auto mock_array1 = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array1, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto real_handle,
MakeTestArray(std::move(mock_array1)));
constexpr int kBadHandle = 400;
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_delete_array_request()->add_array_handle(real_handle);
ifrt_request->mutable_delete_array_request()->add_array_handle(kBadHandle);
TF_ASSERT_OK_AND_ASSIGN(auto resp, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(
CheckFuture(resp->delete_array_response().deletion_future_handle()),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
IsDeleteRelaysBackTheReturnValueFromBackendRuntime) {
tsl::RCReference<xla::ifrt::MockArray> mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array, IsDeleted())
.WillOnce(Return(true))
.WillOnce(Return(false));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_is_array_deleted_request()->set_array_handle(
array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto resp, CallBackend(std::move(ifrt_request)));
EXPECT_TRUE(resp->is_array_deleted_response().deleted());
ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_is_array_deleted_request()->set_array_handle(
array_handle);
TF_ASSERT_OK_AND_ASSIGN(resp, CallBackend(std::move(ifrt_request)));
EXPECT_FALSE(resp->is_array_deleted_response().deleted());
}
TEST_P(IfrtBackendHandlerTest, IsDeleteFailsForNonExistentArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_is_array_deleted_request()->set_array_handle(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, DestructArrayTest) {
tsl::RCReference<xla::ifrt::MockArray> mock_array1 =
tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle1,
MakeTestArray(std::move(mock_array1)));
tsl::RCReference<xla::ifrt::MockArray> mock_array2 =
tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle2,
MakeTestArray(std::move(mock_array2)));
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_destruct_array_request()->add_array_handle(
array_handle1);
ifrt_request->mutable_destruct_array_request()->add_array_handle(
array_handle2);
TF_ASSERT_OK_AND_ASSIGN(auto ifrt_resp, CallBackend(std::move(ifrt_request)));
EXPECT_TRUE(ifrt_resp->has_destruct_array_response());
ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_destruct_array_request()->add_array_handle(
array_handle1);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, CompileSuccess) {
std::vector<MockDevice> devices(4);
for (int i = 0; i < 4; ++i) {
EXPECT_CALL(devices[i], Id()).WillOnce(Return(DeviceId(i)));
}
std::vector<xla::ifrt::Device*> addressable_devices;
for (int i = 0; i < 4; ++i) {
addressable_devices.push_back(&devices[i]);
}
auto executable = std::make_unique<MockLoadedExecutable>();
EXPECT_CALL(*executable, name()).WillOnce(Return("executable_name"));
EXPECT_CALL(*executable, num_devices()).WillOnce(Return(4));
EXPECT_CALL(*executable, addressable_devices())
.WillOnce(Return(absl::MakeSpan(addressable_devices)));
EXPECT_CALL(*executable, Fingerprint()).WillOnce(Return("fingerprint"));
EXPECT_CALL(*executable, GetReadyFuture())
.WillOnce(Return(Future<>(absl::OkStatus())));
ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(executable)));
EXPECT_THAT(response, Partially(EquivToProto(R"pb(
name: "executable_name"
num_devices: 4
addressable_device_ids: [ 0, 1, 2, 3 ]
fingerprint_value: "fingerprint"
)pb")));
TF_EXPECT_OK(CheckFuture(response.ready_future_handle()));
}
#endif
TEST_P(IfrtBackendHandlerTest, CompileFailure) {
ASSERT_THAT(
CompileTestLoadedExecutable(absl::InternalError("injected error")),
StatusIs(absl::StatusCode::kInternal, StrEq("injected error")));
}
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, LoadedExecutableMetadata) {
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
{
OpSharding op_sharding1;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(type: REPLICATED)pb", &op_sharding1));
OpSharding op_sharding2;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(type: OTHER
tile_shape {
element_type: BF16
dimensions: [ 2, 2 ]
}
tile_assignment_dimensions: [ 0, 1 ])pb",
&op_sharding2));
EXPECT_CALL(*executable, GetParameterShardings())
.WillOnce(Return(std::vector<OpSharding>{op_sharding1, op_sharding2}));
EXPECT_CALL(*executable, GetOutputShardings())
.WillOnce(Return(std::vector<OpSharding>{op_sharding1}));
std::vector<std::unique_ptr<xla::PjRtLayout>> parameter_layouts;
parameter_layouts.push_back(std::make_unique<xla::PjRtXlaLayout>(
xla::LayoutUtil::MakeDescendingLayout(1)));
parameter_layouts.push_back(std::make_unique<xla::PjRtXlaLayout>(
xla::LayoutUtil::MakeDescendingLayout(2)));
EXPECT_CALL(*executable, GetParameterLayouts())
.WillOnce(Return(std::move(parameter_layouts)));
std::vector<std::unique_ptr<xla::PjRtLayout>> output_layouts;
output_layouts.push_back(std::make_unique<xla::PjRtXlaLayout>(
xla::LayoutUtil::MakeDescendingLayout(2)));
EXPECT_CALL(*executable, GetOutputLayouts())
.WillOnce(Return(std::move(output_layouts)));
EXPECT_CALL(*executable, GetOutputMemoryKinds())
.WillOnce(Return(std::vector<std::vector<absl::string_view>>{{"foo"}}));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableMetadataRequest* metadata_request =
request->mutable_loaded_executable_metadata_request();
metadata_request->set_loaded_executable_handle(handle);
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(Partially(EquivToProto(R"pb(
loaded_executable_metadata_response {
parameter_shardings {
shardings { type: REPLICATED }
shardings {
type: OTHER
tile_shape {
element_type: BF16
dimensions: [ 2, 2 ]
}
tile_assignment_dimensions: [ 0, 1 ]
}
}
output_shardings { shardings { type: REPLICATED } }
parameter_layouts_list {
layouts { minor_to_major: 0 }
layouts { minor_to_major: [ 1, 0 ] }
}
output_layouts_list { layouts { minor_to_major: [ 1, 0 ] } }
output_memory_kinds {
memory_kind_lists { memory_kinds: [ "foo" ] }
}
}
)pb")))));
}
{
EXPECT_CALL(*executable, GetParameterShardings())
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*executable, GetOutputShardings())
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*executable, GetParameterLayouts())
.WillOnce(Return(absl::UnimplementedError("unimplemented")));
EXPECT_CALL(*executable, GetOutputLayouts())
.WillOnce(Return(absl::UnimplementedError("unimplemented")));
EXPECT_CALL(*executable, GetOutputMemoryKinds())
.WillOnce(Return(std::vector<std::vector<absl::string_view>>{}));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableMetadataRequest* metadata_request =
request->mutable_loaded_executable_metadata_request();
metadata_request->set_loaded_executable_handle(handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
const auto& metadata_response =
response->loaded_executable_metadata_response();
EXPECT_FALSE(metadata_response.has_parameter_shardings());
EXPECT_FALSE(metadata_response.has_output_shardings());
EXPECT_TRUE(metadata_response.has_parameter_layouts_error());
EXPECT_TRUE(metadata_response.has_output_layouts_error());
}
}
#endif
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, LoadedExecutableExecute) {
MockDevice device;
ON_CALL(device, Id()).WillByDefault(Return(DeviceId(0)));
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
constexpr int kNumArgs = 3;
constexpr int kNumOutputs = 2;
Shape shape({2, 2});
auto sharding = SingleDeviceSharding::Create(&device, MemoryKind());
auto make_array = [&]() {
auto array = tsl::MakeRef<MockArray>();
ON_CALL(*array, dtype()).WillByDefault(Return(DType(DType::kF32)));
ON_CALL(*array, shape()).WillByDefault(ReturnRef(shape));
ON_CALL(*array, sharding()).WillByDefault(ReturnRef(*sharding));
return array;
};
std::vector<tsl::RCReference<Array>> outputs;
outputs.reserve(kNumOutputs);
for (int i = 0; i < kNumOutputs; ++i) {
outputs.push_back(make_array());
}
EXPECT_CALL(*executable, Execute(SizeIs(kNumArgs), _, _))
.WillOnce(
Invoke([&](absl::Span<tsl::RCReference<Array>> args,
const xla::ifrt::LoadedExecutable::ExecuteOptions& options,
std::optional<tsl::RCReference<DeviceList>> devices)
-> absl::StatusOr<LoadedExecutable::ExecuteResult> {
return LoadedExecutable::ExecuteResult{
.status = Future<>(absl::InternalError("injected error")),
.outputs = outputs,
};
}));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableExecuteRequest* execute_request =
request->mutable_loaded_executable_execute_request();
for (int i = 0; i < kNumArgs; ++i) {
TF_ASSERT_OK_AND_ASSIGN(uint64_t arg_handle, MakeTestArray(make_array()));
execute_request->add_args_handles(arg_handle);
}
execute_request->set_loaded_executable_handle(handle);
xla::ifrt::LoadedExecutable::ExecuteOptions execute_options;
execute_options.fill_status = true;
TF_ASSERT_OK_AND_ASSIGN(*execute_request->mutable_execute_options(),
execute_options.ToProto());
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
EXPECT_THAT(response, Pointee(Partially(EquivToProto(R"pb(
loaded_executable_execute_response {
outputs {
dtype { kind: KIND_F32 }
shape { dims: [ 2, 2 ] }
}
outputs {
dtype { kind: KIND_F32 }
shape { dims: [ 2, 2 ] }
}
}
)pb"))));
TF_ASSERT_OK_AND_ASSIGN(
auto sharding_proto,
SingleDeviceSharding::Create(&device, MemoryKind())->ToProto());
for (const auto& output :
response->loaded_executable_execute_response().outputs()) {
EXPECT_THAT(output.sharding(), EquivToProto(sharding_proto));
EXPECT_NE(output.array_handle(), 0);
}
EXPECT_THAT(
CheckFuture(
response->loaded_executable_execute_response().status_handle()),
StatusIs(absl::StatusCode::kInternal, StrEq("injected error")));
EXPECT_THAT(
CheckFuture(
response->loaded_executable_execute_response().status_handle()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("Unknown future handle")));
}
#endif
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, LoadedExecutableDelete) {
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
{
EXPECT_CALL(*executable, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableDeleteRequest* delete_request =
request->mutable_loaded_executable_delete_request();
delete_request->set_loaded_executable_handle(handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_executable_delete_response());
EXPECT_THAT(
CheckFuture(
response->loaded_executable_delete_response().future_handle()),
IsOk());
}
{
EXPECT_CALL(*executable, IsDeleted()).WillOnce(Return(true));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableIsDeletedRequest* is_deleted_request =
request->mutable_loaded_executable_is_deleted_request();
is_deleted_request->set_loaded_executable_handle(handle);
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(Partially(EquivToProto(R"pb(
loaded_executable_is_deleted_response { is_deleted: true }
)pb")))));
}
}
#endif
TEST_P(IfrtBackendHandlerTest, LoadedExecutableDestruct) {
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
{
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableDestructRequest* destruct_request =
request->mutable_loaded_executable_destruct_request();
destruct_request->set_loaded_executable_handle(handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_executable_destruct_response());
}
{
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableDestructRequest* destruct_request =
request->mutable_loaded_executable_destruct_request();
destruct_request->set_loaded_executable_handle(handle);
EXPECT_THAT(CallBackend(std::move(request)),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("Unknown loaded executable handle")));
}
}
TEST_P(IfrtBackendHandlerTest, LoadedHostCallbackExecute) {
std::vector<xla::HostCallbackArgInfo> hcb_args = {{
.channel_id = 1,
.shape = xla::ShapeUtil::MakeShape(xla::F32, {}),
}};
std::vector<xla::HostCallbackArgInfo> hcb_results = {{
.channel_id = 2,
.shape = xla::ShapeUtil::MakeShape(xla::F32, {}),
}};
auto hcb = tsl::MakeRef<RemoteLoadedHostCallback>(
mock_client_, std::move(hcb_args), std::move(hcb_results),
nullptr);
MockLoadedExecutable* executable;
tsl::RCReference<xla::ifrt::LoadedHostCallback> loaded_host_callback;
uint64_t loaded_host_callback_handle;
{
auto request = NewIfrtRequest(NewOpId());
CompileRequest* compile_request = request->mutable_compile_request();
TestProgram program;
TF_ASSERT_OK_AND_ASSIGN(*compile_request->mutable_program(),
Serialize(program));
xla::ifrt::XlaCompileOptions compile_options;
TF_ASSERT_OK_AND_ASSIGN(*compile_request->mutable_compile_options(),
Serialize(compile_options));
TF_ASSERT_OK_AND_ASSIGN(std::string host_callback_serialized,
hcb->Serialize());
compile_request->add_host_callbacks(std::move(host_callback_serialized));
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
EXPECT_CALL(mock_compiler_, Compile(_, _))
.WillOnce(DoAll(
Invoke(
[&](const std::unique_ptr<xla::ifrt::Program>& program,
const std::unique_ptr<xla::ifrt::CompileOptions>& options) {
auto* xla_compile_options =
llvm::cast<xla::ifrt::XlaCompileOptions>(options.get());
auto& loaded_host_callbacks =
xla_compile_options->loaded_host_callbacks;
ASSERT_EQ(loaded_host_callbacks.size(), 1);
loaded_host_callback = loaded_host_callbacks.front();
}),
Return(ByMove(std::move(e)))));
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_compile_response());
CompileResponse compile_response = response->compile_response();
loaded_host_callback_handle =
compile_response.loaded_host_callback_handles(0);
ASSERT_THAT(loaded_host_callback, NotNull());
}
auto host_callback_thread = absl::WrapUnique(tsl::Env::Default()->StartThread(
tsl::ThreadOptions(), "HostCallback", [&]() {
xla::Literal x = xla::LiteralUtil::CreateR0(1.0f);
std::vector<void*> operands;
operands.push_back(x.untyped_data());
xla::Literal out = xla::LiteralUtil::CreateR0(0.0f);
std::vector<void*> results;
results.push_back(out.untyped_data());
const xla::HostCallback* xla_host_callback =
&llvm::cast<RemoteLoadedHostCallback>(loaded_host_callback.get())
->host_callback();
ASSERT_THAT(
xla_host_callback->callback(results.data(), operands.data()),
IsOk());
EXPECT_EQ(out, xla::LiteralUtil::CreateR0(2.0f));
}));
uint64_t host_callback_execution_handle;
{
const uint64_t operand_host_buffer_handle = NewHostBufferHandle();
auto request = NewIfrtRequest(NewOpId());
LoadedHostCallbackPollRequest* poll_request =
request->mutable_loaded_host_callback_poll_request();
poll_request->set_loaded_host_callback_handle(loaded_host_callback_handle);
poll_request->set_operand_host_buffer_handle(operand_host_buffer_handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_host_callback_poll_response());
const LoadedHostCallbackPollResponse& poll_response =
response->loaded_host_callback_poll_response();
host_callback_execution_handle =
poll_response.host_callback_execution_handle();
TF_ASSERT_OK_AND_ASSIGN(
const std::shared_ptr<const std::string> operands,
host_buffer_store_->Lookup(operand_host_buffer_handle));
EXPECT_EQ(xla::BorrowingLiteral(operands->data(),
xla::ShapeUtil::MakeShape(xla::F32, {})),
xla::LiteralUtil::CreateR0(1.0f));
}
{
auto result = xla::LiteralUtil::CreateR0(2.0f);
std::string result_buffer(absl::string_view(
static_cast<const char*>(result.untyped_data()), result.size_bytes()));
const uint64_t result_host_buffer_handle = NewHostBufferHandle();
ASSERT_THAT(host_buffer_store_->Store(result_host_buffer_handle,
std::move(result_buffer)),
IsOk());
auto request = NewIfrtRequest(NewOpId());
LoadedHostCallbackReturnRequest* ret_request =
request->mutable_loaded_host_callback_return_request();
ret_request->set_host_callback_execution_handle(
host_callback_execution_handle);
ret_request->set_result_host_buffer_handle(result_host_buffer_handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_host_callback_return_response());
}
}
TEST_P(IfrtBackendHandlerTest, GetDefaultDeviceAssignmentSuccess) {
const int kNumReplicas = 1;
const int kNumPartitions = 3;
EXPECT_CALL(*mock_client_,
GetDefaultDeviceAssignment(kNumReplicas, kNumPartitions))
.WillOnce(Return(xla::DeviceAssignment(kNumReplicas, kNumPartitions)));
auto request = NewIfrtRequest(NewOpId());
auto* default_device_assignment_request =
request->mutable_get_default_device_assignment_request();
default_device_assignment_request->set_num_replicas(kNumReplicas);
default_device_assignment_request->set_num_partitions(kNumPartitions);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(request)));
TF_ASSERT_OK_AND_ASSIGN(auto assignment_got,
xla::DeviceAssignment::Deserialize(
response->get_default_device_assignment_response()
.device_assignment()));
EXPECT_EQ(assignment_got->replica_count(), kNumReplicas);
EXPECT_EQ(assignment_got->computation_count(), kNumPartitions);
}
TEST_P(IfrtBackendHandlerTest,
GetDefaultDeviceAssignmentFailsIfTheBackendFails) {
const int kNumReplicas = 1;
const int kNumPartitions = 3;
EXPECT_CALL(*mock_client_,
GetDefaultDeviceAssignment(kNumReplicas, kNumPartitions))
.WillOnce(Return(absl::UnknownError("injected error")));
auto request = NewIfrtRequest(NewOpId());
auto* default_device_assignment_request =
request->mutable_get_default_device_assignment_request();
default_device_assignment_request->set_num_replicas(kNumReplicas);
default_device_assignment_request->set_num_partitions(kNumPartitions);
EXPECT_THAT(CallBackend(std::move(request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
INSTANTIATE_TEST_SUITE_P(
IfrtBackendHandlerTestWithAllVersions, IfrtBackendHandlerTest,
testing::Range(kServerMinVersion, kServerMaxVersion + 1),
[](const testing::TestParamInfo<IfrtBackendHandlerTest::ParamType>& info) {
return absl::StrCat(info.param);
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a1e9b2d-2600-48e3-99d7-97d71167687b | cpp | google/libaddressinput | region_data | cpp/src/region_data.cc | cpp/test/region_data_test.cc | #include <libaddressinput/region_data.h>
#include <cstddef>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
RegionData::RegionData(const std::string& region_code)
: key_(region_code),
name_(region_code),
parent_(nullptr),
sub_regions_() {}
RegionData::~RegionData() {
for (auto ptr : sub_regions_) {
delete ptr;
}
}
RegionData* RegionData::AddSubRegion(const std::string& key,
const std::string& name) {
auto* sub_region = new RegionData(key, name, this);
sub_regions_.push_back(sub_region);
return sub_region;
}
RegionData::RegionData(const std::string& key,
const std::string& name,
RegionData* parent)
: key_(key), name_(name), parent_(parent), sub_regions_() {}
}
} | #include <libaddressinput/region_data.h>
#include <cstddef>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::RegionData;
TEST(RegionDataTest, NoParentByDefault) {
static const std::string kEmpty;
RegionData region(kEmpty);
EXPECT_FALSE(region.has_parent());
}
TEST(RegionDataTest, NoSubRegionsByDefault) {
static const std::string kEmpty;
RegionData region(kEmpty);
EXPECT_TRUE(region.sub_regions().empty());
}
TEST(RegionDataTest, SubRegionGetsParent) {
static const std::string kEmpty;
RegionData region(kEmpty);
region.AddSubRegion(kEmpty, kEmpty);
ASSERT_EQ(1U, region.sub_regions().size());
ASSERT_TRUE(region.sub_regions()[0] != nullptr);
EXPECT_EQ(®ion, ®ion.sub_regions()[0]->parent());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/region_data.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/region_data_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
36f85540-8919-4e0b-b082-182c3285944e | cpp | tensorflow/tensorflow | trt_engine_op | tensorflow/compiler/tf2tensorrt/ops/trt_engine_op.cc | tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
REGISTER_OP("TRTEngineOp")
.Attr("serialized_segment: string")
.Attr("segment_func: func = {}")
.Attr("InT: list({bool,int8,float16,float32,int32,resource})")
.Attr("OutT: list({bool,int8,float16,float32,int32})")
.Attr("input_shapes: list(shape) = []")
.Attr("output_shapes: list(shape) = []")
.Attr("max_cached_engines_count: int = 1")
.Attr("max_batch_size: int = 1")
.Attr("workspace_size_bytes: int")
.Attr("precision_mode: {'FP32', 'FP16', 'INT8'}")
.Attr("calibration_data: string = ''")
.Attr("use_calibration: bool = true")
.Input("in_tensor: InT")
.Output("out_tensor: OutT")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
std::vector<tensorflow::PartialTensorShape> output_shapes;
TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
for (int i = 0; i < output_shapes.size(); i++) {
::tensorflow::shape_inference::ShapeHandle shape;
shape_inference::ShapeHandle output_shape_handle;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(
output_shapes[i], &output_shape_handle));
c->set_output(i, output_shape_handle);
}
return OkStatus();
})
.Attr("segment_funcdef_name: string = ''")
.Attr("cached_engine_batches: list(int) >= 0 = []")
.Attr("fixed_input_size: bool = true")
.Attr("static_engine: bool = true")
.Attr("profile_strategy: string = ''")
.Attr("use_explicit_precision: bool = false");
}
#endif | #include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_graph.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "xla/tsl/framework/fixedpoint/FixedPoint.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/public/version.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
using ::absl::StrCat;
using ::testing::ElementsAre;
struct TestParam {
bool static_engine;
};
class TRTEngineOpTestBase : public OpsTestBase {
public:
void AddSimpleTrtOp(DataType dtype, int max_cached_engines_count = 1,
PartialTensorShape shape = PartialTensorShape({-1, -1}),
bool use_implicit_batch = true,
bool allow_build_at_runtime = true,
bool static_engine = false) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("GPU", {}, "/job:worker/replica:0/task:0"));
Scope s = Scope::NewRootScope();
auto feed = ops::_Arg(s.WithOpName("TensorRTInputPH_0"), dtype, 0);
auto add = ops::Add(s.WithOpName("add"), feed, feed);
ops::_Retval give_me_a_name(s.WithOpName("TensorRTOutputPH_0"), add, 0);
GraphDef graph_def;
TF_ASSERT_OK(s.ToGraphDef(&graph_def));
Graph* graph = s.graph();
TF_ASSERT_OK(convert::RegisterGraphToFunctionLibrary(graph_def, graph,
std::string(kOpName)));
TF_ASSERT_OK(flib_def_->AddLibrary(graph->flib_def()));
string segment_string;
if (static_engine) {
convert::TRTOptimizationPass::ConversionParams params;
convert::EngineInfo info;
info.segment_graph_def.CopyFrom(graph_def);
info.precision_mode = TrtPrecisionMode::FP32;
info.max_workspace_size_bytes = 1 << 20;
info.engine_name = "TRTEngineOP_000_000";
params.use_implicit_batch = use_implicit_batch;
params.trt_logger_name = "DefaultLogger";
TrtShapeOptimizationProfile profile;
std::vector<bool> input_mask = {true};
profile.SetInputMask(input_mask);
TensorShape my_shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{4, 2}, &my_shape));
profile.AddShape({my_shape, {}});
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{1, 2}, &my_shape));
profile.AddShape({my_shape, {}});
profile.InitProfiles({shape}, ProfileStrategy::kOptimal);
std::vector<PartialTensorShape> shape_vec{shape, {}};
TF_CHECK_OK(convert::CreateStaticEngine(
params, info, 1, shape_vec, &profile, &segment_string, nullptr));
}
OpsTestBase::SetDevice(DEVICE_GPU, std::move(device));
NameAttrList function;
function.set_name(StrCat(std::string(kOpName), "_native_segment"));
TF_ASSERT_OK(NodeDefBuilder(std::string(kOpName), "TRTEngineOp")
.Input(FakeInput(1, dtype))
.Attr("input_shapes", {shape})
.Attr("output_shapes", {shape})
.Attr("static_engine", static_engine)
.Attr("segment_func", function)
.Attr("serialized_segment", segment_string)
.Attr("calibration_data", "")
.Attr("max_cached_engines_count", max_cached_engines_count)
.Attr("workspace_size_bytes", 1 << 20)
.Attr("precision_mode", "FP32")
.Attr("use_calibration", false)
.Attr("profile_strategy", "optimal")
.Attr("_use_implicit_batch", use_implicit_batch)
.Attr("_allow_build_at_runtime", allow_build_at_runtime)
.Attr("_allow_soft_placement", false)
.Attr("OutT", {dtype})
.Finalize(OpsTestBase::node_def()));
TF_ASSERT_OK(InitOpWithFunctionLibrary());
}
static const absl::string_view kOpName;
template <typename T>
void AddSimpleInput(const TensorShape& shape) {
std::vector<T> input(shape.num_elements());
std::iota(input.begin(), input.end(), T(0));
OpsTestBase::AddInputFromArray<T>(shape, input);
}
void ResetInputs() {
inputs_.clear();
for (auto& temp : tensors_) {
delete temp;
}
tensors_.clear();
}
private:
Status InitOpWithFunctionLibrary() {
OpKernel* kernel = nullptr;
auto flr = pflr_->GetFLR(device_->name());
std::shared_ptr<const NodeProperties> props;
Status status = NodeProperties::CreateFromNodeDef(
node_def_, flr->GetFunctionLibraryDefinition(), &props);
if (status.ok()) {
status.Update(CreateOpKernel(device_type_, device_, allocator(), flr,
props, TF_GRAPH_DEF_VERSION, &kernel));
}
kernel_ = std::unique_ptr<OpKernel>(kernel);
if (kernel_ != nullptr) input_types_ = kernel_->input_types();
return status;
}
};
class TRTEngineOpTestWithParam
: public TRTEngineOpTestBase,
public ::testing::WithParamInterface<TestParam> {
public:
TRTEngineOpTestWithParam() : param_(GetParam()) {}
protected:
TestParam param_;
};
const absl::string_view TRTEngineOpTestBase::kOpName = "myop";
constexpr std::array<TestParam, 2> TestParameters{TestParam{false},
TestParam{true}};
INSTANTIATE_TEST_CASE_P(TRTEngineOpTestInstantiation, TRTEngineOpTestWithParam,
::testing::ValuesIn(TestParameters));
TEST_F(TRTEngineOpTestBase, DynamicEngines) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 4);
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({2, 2}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({1, 2}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(1, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({3, 2}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(2, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
EXPECT_EQ(1, cache->count({TensorShape({3, 2})}));
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({10, 10}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({1, 10}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(3, cache->size());
EXPECT_EQ(1, cache->count({TensorShape({2, 2})}));
EXPECT_EQ(1, cache->count({TensorShape({3, 2})}));
EXPECT_EQ(1, cache->count({TensorShape({10, 10})}));
}
TEST_F(TRTEngineOpTestBase, AllowBuildAtRuntime) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 1,
PartialTensorShape({-1, -1}),
true,
false);
TensorShape input_shape({2, 2});
TRTEngineOpTestBase::AddSimpleInput<float>(input_shape);
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
ASSERT_EQ(1, cache->count({input_shape}));
EngineContext* ectx = cache->at({input_shape}).get();
EXPECT_EQ(ectx->GetCudaEngine(), nullptr);
}
TEST_P(TRTEngineOpTestWithParam, ExplicitBatch) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 1,
PartialTensorShape({1, 2}),
false,
true,
param_.static_engine);
TensorShape input_shape({1, 2});
TRTEngineOpTestBase::AddSimpleInput<float>(input_shape);
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
ASSERT_EQ(1, cache->count({input_shape}));
EngineContext* ectx = cache->at({input_shape}).get();
EXPECT_NE(ectx->GetCudaEngine(), nullptr);
}
TEST_P(TRTEngineOpTestWithParam, DynamicShapes) {
TRTEngineOpTestBase::AddSimpleTrtOp(DT_FLOAT, 1,
PartialTensorShape({-1, -1}),
false,
true,
param_.static_engine);
TensorShape input_shape({1, 2});
TRTEngineOpTestBase::AddSimpleInput<float>(input_shape);
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
TRTEngineCacheResource* cache_resource = nullptr;
TF_ASSERT_OK(device_->resource_manager()->Lookup(
std::string(kTfTrtContainerName), std::string(kOpName), &cache_resource));
core::ScopedUnref sc(cache_resource);
auto cache = &cache_resource->cache_;
EXPECT_EQ(1, cache->size());
ASSERT_EQ(1, cache->count({input_shape}));
EngineContext* ectx = cache->at({input_shape}).get();
EXPECT_NE(ectx->GetCudaEngine(), nullptr);
ResetInputs();
TRTEngineOpTestBase::AddSimpleInput<float>(TensorShape({1, 37}));
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
EXPECT_EQ(1, cache->size());
EXPECT_EQ(0, cache->count({TensorShape({1, 37})}));
}
template <typename T>
class TRTEngineOpTest : public TRTEngineOpTestBase {};
using TypeList = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(TRTEngineOpTest, TypeList);
TYPED_TEST(TRTEngineOpTest, Basic) {
TRTEngineOpTestBase::AddSimpleTrtOp(DataTypeToEnum<TypeParam>::v());
OpsTestBase::AddInputFromArray<TypeParam>(TensorShape({1, 2}),
{TypeParam(0.0f), TypeParam(1.0f)});
TF_ASSERT_OK(OpsTestBase::RunOpKernel());
Tensor* output = OpsTestBase::GetOutput(0);
EXPECT_THAT(
absl::Span<const TypeParam>(output->template flat<TypeParam>().data(),
output->NumElements()),
ElementsAre(TypeParam(0.0f), TypeParam(2.0f)));
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/ops/trt_engine_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cb0e2bda-0e2c-4bc6-b373-3abf2587c040 | cpp | google/tensorstore | image_writer | tensorstore/internal/image/image_writer.h | tensorstore/internal/image/image_writer_test.cc | #ifndef TENSORSTORE_INTERNAL_IMAGE_IMAGE_WRITER_H_
#define TENSORSTORE_INTERNAL_IMAGE_IMAGE_WRITER_H_
#include "absl/status/status.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_image {
class ImageWriter {
public:
virtual ~ImageWriter() = default;
virtual absl::Status Initialize(riegeli::Writer*) = 0;
virtual absl::Status Encode(
const ImageInfo& image,
tensorstore::span<const unsigned char> source) = 0;
virtual absl::Status Done() = 0;
};
}
}
#endif | #include "tensorstore/internal/image/image_writer.h"
#include <stddef.h>
#include <stdint.h>
#include <any>
#include <cmath>
#include <functional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/image/avif_reader.h"
#include "tensorstore/internal/image/avif_writer.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/image_reader.h"
#include "tensorstore/internal/image/image_view.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/jpeg_writer.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/png_writer.h"
#include "tensorstore/internal/image/tiff_reader.h"
#include "tensorstore/internal/image/tiff_writer.h"
#include "tensorstore/internal/image/webp_reader.h"
#include "tensorstore/internal/image/webp_writer.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal_image::AvifReader;
using ::tensorstore::internal_image::AvifReaderOptions;
using ::tensorstore::internal_image::AvifWriter;
using ::tensorstore::internal_image::AvifWriterOptions;
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::ImageReader;
using ::tensorstore::internal_image::ImageView;
using ::tensorstore::internal_image::ImageWriter;
using ::tensorstore::internal_image::JpegReader;
using ::tensorstore::internal_image::JpegWriter;
using ::tensorstore::internal_image::JpegWriterOptions;
using ::tensorstore::internal_image::PngReader;
using ::tensorstore::internal_image::PngWriter;
using ::tensorstore::internal_image::PngWriterOptions;
using ::tensorstore::internal_image::TiffReader;
using ::tensorstore::internal_image::TiffWriter;
using ::tensorstore::internal_image::TiffWriterOptions;
using ::tensorstore::internal_image::WebPReader;
using ::tensorstore::internal_image::WebPReaderOptions;
using ::tensorstore::internal_image::WebPWriter;
using ::tensorstore::internal_image::WebPWriterOptions;
template <typename T>
const T* GetPointerFromAny(std::any* any_ptr) {
if (!any_ptr->has_value()) {
return nullptr;
}
if (auto opt = std::any_cast<T>(any_ptr); opt != nullptr) {
return opt;
}
if (auto opt = std::any_cast<std::reference_wrapper<T>>(any_ptr);
opt != nullptr) {
return &(opt->get());
}
if (auto opt = std::any_cast<std::reference_wrapper<const T>>(any_ptr);
opt != nullptr) {
return &(opt->get());
}
return nullptr;
}
double ComputeRMSE(const unsigned char* a, const unsigned char* b, size_t c) {
double squared_error = 0;
for (size_t i = 0; i < c; ++i) {
const int diff = static_cast<double>(a[i]) - static_cast<double>(b[i]);
squared_error += diff * diff;
}
return std::sqrt(squared_error / static_cast<double>(c));
}
void MakeTestImage(const ImageInfo& info,
tensorstore::span<unsigned char> data) {
ImageView image(info, data);
uint64_t lcg = info.width * info.height * info.num_components;
for (size_t y = 0; y < info.height; ++y) {
auto* row = image.data_row(y).data();
for (size_t x = 0; x < info.width; ++x) {
double gradient = static_cast<double>(x + y) /
static_cast<double>(info.width + info.height);
*row++ = static_cast<unsigned char>(gradient * 255);
if (info.num_components > 1) {
lcg = (lcg * 6364136223846793005) + 1;
*row++ = static_cast<unsigned char>(lcg);
}
if (info.num_components > 2) {
*row++ = (y & 1) ? static_cast<unsigned char>((1.0 - gradient) * 255)
: static_cast<unsigned char>(x);
}
if (info.num_components > 3) {
*row++ =
(y & 1)
? static_cast<unsigned char>(x)
: static_cast<unsigned char>(std::abs(128 - gradient * 255));
}
}
}
}
struct TestParam {
std::any options;
ImageInfo image_params;
double rmse_error_limit = 0;
std::any reader_options;
};
[[maybe_unused]] std::string PrintToString(const TestParam& p) {
return absl::StrCat(p.image_params.num_components,
p.rmse_error_limit != 0 ? "_rmse" : "");
}
class WriterTest : public ::testing::TestWithParam<TestParam> {
public:
WriterTest() {
std::any* options = const_cast<std::any*>(&GetParam().options);
if (GetPointerFromAny<TiffWriterOptions>(options)) {
writer = std::make_unique<TiffWriter>();
reader = std::make_unique<TiffReader>();
} else if (GetPointerFromAny<JpegWriterOptions>(options)) {
writer = std::make_unique<JpegWriter>();
reader = std::make_unique<JpegReader>();
} else if (GetPointerFromAny<PngWriterOptions>(options)) {
writer = std::make_unique<PngWriter>();
reader = std::make_unique<PngReader>();
} else if (GetPointerFromAny<AvifWriterOptions>(options)) {
writer = std::make_unique<AvifWriter>();
reader = std::make_unique<AvifReader>();
} else if (GetPointerFromAny<WebPWriterOptions>(options)) {
writer = std::make_unique<WebPWriter>();
reader = std::make_unique<WebPReader>();
}
}
absl::Status InitializeWithOptions(riegeli::Writer* riegeli_writer) {
std::any* options = const_cast<std::any*>(&GetParam().options);
if (auto* ptr = GetPointerFromAny<TiffWriterOptions>(options)) {
return reinterpret_cast<TiffWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<JpegWriterOptions>(options)) {
return reinterpret_cast<JpegWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<PngWriterOptions>(options)) {
return reinterpret_cast<PngWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<AvifWriterOptions>(options)) {
return reinterpret_cast<AvifWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<WebPWriterOptions>(options)) {
return reinterpret_cast<WebPWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
}
return writer->Initialize(riegeli_writer);
}
absl::Status DecodeWithOptions(tensorstore::span<unsigned char> dest) {
std::any* options = const_cast<std::any*>(&GetParam().reader_options);
if (auto* ptr = GetPointerFromAny<AvifReaderOptions>(options)) {
return reinterpret_cast<AvifReader*>(reader.get())->Decode(dest, *ptr);
}
return reader->Decode(dest);
}
std::unique_ptr<ImageWriter> writer;
std::unique_ptr<ImageReader> reader;
};
TEST_P(WriterTest, RoundTrip) {
ASSERT_FALSE(writer == nullptr);
ASSERT_FALSE(reader.get() == nullptr);
const ImageInfo source_info = GetParam().image_params;
std::vector<unsigned char> source(ImageRequiredBytes(source_info));
MakeTestImage(source_info, source);
absl::Cord encoded;
{
riegeli::CordWriter riegeli_writer(&encoded);
ASSERT_THAT(InitializeWithOptions(&riegeli_writer), ::tensorstore::IsOk());
ASSERT_THAT(writer->Encode(source_info, source), ::tensorstore::IsOk());
ASSERT_THAT(writer->Done(), ::tensorstore::IsOk());
}
ImageInfo decoded_info;
std::vector<unsigned char> decoded(source.size());
{
riegeli::CordReader cord_reader(&encoded);
ASSERT_THAT(reader->Initialize(&cord_reader), ::tensorstore::IsOk());
decoded_info = reader->GetImageInfo();
EXPECT_EQ(decoded_info.width, source_info.width);
EXPECT_EQ(decoded_info.height, source_info.height);
EXPECT_EQ(decoded_info.num_components, source_info.num_components);
EXPECT_THAT(DecodeWithOptions(decoded), ::tensorstore::IsOk());
}
double rmse = ComputeRMSE(decoded.data(), source.data(), source.size());
if (GetParam().rmse_error_limit == 0) {
EXPECT_EQ(0, rmse) << "\nA: " << source_info << " "
<< "\nB: " << decoded_info;
EXPECT_THAT(decoded, testing::Eq(source));
} else {
EXPECT_LT(rmse, GetParam().rmse_error_limit) << decoded_info;
}
}
INSTANTIATE_TEST_SUITE_P(
AvifLossless, WriterTest,
::testing::Values(
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 1}, 0},
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 2}, 0},
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 3}, 0},
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
AVifLossy, WriterTest,
::testing::Values(
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 1}, 0.26},
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 2}, 0.5},
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 3}, 28.5},
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 4}, 24.5}));
INSTANTIATE_TEST_SUITE_P(
AVifExtended, WriterTest,
::testing::Values(
TestParam{AvifWriterOptions{0, 6, false}, ImageInfo{33, 100, 3}, 0,
AvifReaderOptions{false}},
TestParam{AvifWriterOptions{0, 6, false}, ImageInfo{33, 100, 4}, 0,
AvifReaderOptions{false}},
TestParam{AvifWriterOptions{1, 6, false}, ImageInfo{33, 100, 3}, 0.5,
AvifReaderOptions{false}},
TestParam{AvifWriterOptions{1, 6, false}, ImageInfo{33, 100, 4}, 44,
AvifReaderOptions{false}}));
INSTANTIATE_TEST_SUITE_P(
JpegFiles, WriterTest,
::testing::Values(
TestParam{JpegWriterOptions{100}, ImageInfo{33, 100, 1}, 0.5},
TestParam{JpegWriterOptions{100}, ImageInfo{33, 100, 3}, 48}));
INSTANTIATE_TEST_SUITE_P(
PngFiles, WriterTest,
::testing::Values(
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 1}, 0},
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 2}, 0},
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 3}, 0},
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
TiffFiles, WriterTest,
::testing::Values(
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 1}, 0},
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 2}, 0},
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 3}, 0},
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
WebPLossless, WriterTest,
::testing::Values(
TestParam{WebPWriterOptions{true}, ImageInfo{33, 100, 3}, 0},
TestParam{WebPWriterOptions{true}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
WebPLossy, WriterTest,
::testing::Values(
TestParam{WebPWriterOptions{false}, ImageInfo{33, 100, 3}, 47},
TestParam{WebPWriterOptions{false}, ImageInfo{33, 100, 4}, 44}));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_writer.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_writer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
85734204-9025-4511-b9a2-be5a6946b858 | cpp | tensorflow/tensorflow | minimum | tensorflow/lite/experimental/shlo/ops/minimum.cc | tensorflow/lite/delegates/xnnpack/minimum_test.cc | #include "tensorflow/lite/experimental/shlo/ops/minimum.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Minimum {
template <class T>
constexpr auto operator()(const T a, const T b) {
return a < b ? a : b;
}
};
MinimumOp Create(MinimumOp::Attributes) { return {}; }
absl::Status Prepare(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("minimum"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("minimum"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("minimum"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Minimum minimum;
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), minimum, lhs, rhs,
output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
minimum, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.minimum: Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Minimum, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/minimum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/minimum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a440b6ec-0039-4608-bdc9-1f4f65da6810 | cpp | google/quiche | spdy_frame_builder | quiche/http2/core/spdy_frame_builder.cc | quiche/http2/core/spdy_frame_builder_test.cc | #include "quiche/http2/core/spdy_frame_builder.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_bitmasks.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/core/zero_copy_output_buffer.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
SpdyFrameBuilder::SpdyFrameBuilder(size_t size)
: buffer_(new char[size]), capacity_(size), length_(0), offset_(0) {}
SpdyFrameBuilder::SpdyFrameBuilder(size_t size, ZeroCopyOutputBuffer* output)
: buffer_(output == nullptr ? new char[size] : nullptr),
output_(output),
capacity_(size),
length_(0),
offset_(0) {}
SpdyFrameBuilder::~SpdyFrameBuilder() = default;
char* SpdyFrameBuilder::GetWritableBuffer(size_t length) {
if (!CanWrite(length)) {
return nullptr;
}
return buffer_.get() + offset_ + length_;
}
char* SpdyFrameBuilder::GetWritableOutput(size_t length,
size_t* actual_length) {
char* dest = nullptr;
int size = 0;
if (!CanWrite(length)) {
return nullptr;
}
output_->Next(&dest, &size);
*actual_length = std::min<size_t>(length, size);
return dest;
}
bool SpdyFrameBuilder::Seek(size_t length) {
if (!CanWrite(length)) {
return false;
}
if (output_ == nullptr) {
length_ += length;
} else {
output_->AdvanceWritePtr(length);
length_ += length;
}
return true;
}
bool SpdyFrameBuilder::BeginNewFrame(SpdyFrameType type, uint8_t flags,
SpdyStreamId stream_id) {
uint8_t raw_frame_type = SerializeFrameType(type);
QUICHE_DCHECK(IsDefinedFrameType(raw_frame_type));
QUICHE_DCHECK_EQ(0u, stream_id & ~kStreamIdMask);
bool success = true;
if (length_ > 0) {
QUICHE_BUG(spdy_bug_73_1)
<< "SpdyFrameBuilder doesn't have a clean state when BeginNewFrame"
<< "is called. Leftover length_ is " << length_;
offset_ += length_;
length_ = 0;
}
success &= WriteUInt24(capacity_ - offset_ - kFrameHeaderSize);
success &= WriteUInt8(raw_frame_type);
success &= WriteUInt8(flags);
success &= WriteUInt32(stream_id);
QUICHE_DCHECK_EQ(kDataFrameMinimumSize, length_);
return success;
}
bool SpdyFrameBuilder::BeginNewFrame(SpdyFrameType type, uint8_t flags,
SpdyStreamId stream_id, size_t length) {
uint8_t raw_frame_type = SerializeFrameType(type);
QUICHE_DCHECK(IsDefinedFrameType(raw_frame_type));
QUICHE_DCHECK_EQ(0u, stream_id & ~kStreamIdMask);
QUICHE_BUG_IF(spdy_bug_73_2, length > kSpdyMaxFrameSizeLimit)
<< "Frame length " << length << " is longer than frame size limit.";
return BeginNewFrameInternal(raw_frame_type, flags, stream_id, length);
}
bool SpdyFrameBuilder::BeginNewUncheckedFrame(uint8_t raw_frame_type,
uint8_t flags,
SpdyStreamId stream_id,
size_t length) {
return BeginNewFrameInternal(raw_frame_type, flags, stream_id, length);
}
bool SpdyFrameBuilder::BeginNewFrameInternal(uint8_t raw_frame_type,
uint8_t flags,
SpdyStreamId stream_id,
size_t length) {
QUICHE_DCHECK_EQ(length, length & kLengthMask);
bool success = true;
offset_ += length_;
length_ = 0;
success &= WriteUInt24(length);
success &= WriteUInt8(raw_frame_type);
success &= WriteUInt8(flags);
success &= WriteUInt32(stream_id);
QUICHE_DCHECK_EQ(kDataFrameMinimumSize, length_);
return success;
}
bool SpdyFrameBuilder::WriteStringPiece32(const absl::string_view value) {
if (!WriteUInt32(value.size())) {
return false;
}
return WriteBytes(value.data(), value.size());
}
bool SpdyFrameBuilder::WriteBytes(const void* data, uint32_t data_len) {
if (!CanWrite(data_len)) {
return false;
}
if (output_ == nullptr) {
char* dest = GetWritableBuffer(data_len);
memcpy(dest, data, data_len);
Seek(data_len);
} else {
char* dest = nullptr;
size_t size = 0;
size_t total_written = 0;
const char* data_ptr = reinterpret_cast<const char*>(data);
while (data_len > 0) {
dest = GetWritableOutput(data_len, &size);
if (dest == nullptr || size == 0) {
return false;
}
uint32_t to_copy = std::min<uint32_t>(data_len, size);
const char* src = data_ptr + total_written;
memcpy(dest, src, to_copy);
Seek(to_copy);
data_len -= to_copy;
total_written += to_copy;
}
}
return true;
}
bool SpdyFrameBuilder::CanWrite(size_t length) const {
if (length > kLengthMask) {
QUICHE_DCHECK(false);
return false;
}
if (output_ == nullptr) {
if (offset_ + length_ + length > capacity_) {
QUICHE_DLOG(FATAL) << "Requested: " << length
<< " capacity: " << capacity_
<< " used: " << offset_ + length_;
return false;
}
} else {
if (length > output_->BytesFree()) {
return false;
}
}
return true;
}
} | #include "quiche/http2/core/spdy_frame_builder.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/strings/string_view.h"
#include "quiche/http2/core/array_output_buffer.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/test_tools/spdy_test_utils.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
namespace test {
class QUICHE_EXPORT SpdyFrameBuilderPeer {
public:
static char* GetWritableBuffer(SpdyFrameBuilder* builder, size_t length) {
return builder->GetWritableBuffer(length);
}
static char* GetWritableOutput(SpdyFrameBuilder* builder,
size_t desired_length, size_t* actual_length) {
return builder->GetWritableOutput(desired_length, actual_length);
}
};
namespace {
const int64_t kSize = 64 * 1024;
char output_buffer[kSize] = "";
}
TEST(SpdyFrameBuilderTest, GetWritableBuffer) {
const size_t kBuilderSize = 10;
SpdyFrameBuilder builder(kBuilderSize);
char* writable_buffer =
SpdyFrameBuilderPeer::GetWritableBuffer(&builder, kBuilderSize);
memset(writable_buffer, ~1, kBuilderSize);
EXPECT_TRUE(builder.Seek(kBuilderSize));
SpdySerializedFrame frame(builder.take());
char expected[kBuilderSize];
memset(expected, ~1, kBuilderSize);
EXPECT_EQ(absl::string_view(expected, kBuilderSize), frame);
}
TEST(SpdyFrameBuilderTest, GetWritableOutput) {
ArrayOutputBuffer output(output_buffer, kSize);
const size_t kBuilderSize = 10;
SpdyFrameBuilder builder(kBuilderSize, &output);
size_t actual_size = 0;
char* writable_buffer = SpdyFrameBuilderPeer::GetWritableOutput(
&builder, kBuilderSize, &actual_size);
memset(writable_buffer, ~1, kBuilderSize);
EXPECT_TRUE(builder.Seek(kBuilderSize));
SpdySerializedFrame frame = MakeSerializedFrame(output.Begin(), kBuilderSize);
char expected[kBuilderSize];
memset(expected, ~1, kBuilderSize);
EXPECT_EQ(absl::string_view(expected, kBuilderSize), frame);
}
TEST(SpdyFrameBuilderTest, GetWritableOutputNegative) {
size_t small_cap = 1;
ArrayOutputBuffer output(output_buffer, small_cap);
const size_t kBuilderSize = 10;
SpdyFrameBuilder builder(kBuilderSize, &output);
size_t actual_size = 0;
char* writable_buffer = SpdyFrameBuilderPeer::GetWritableOutput(
&builder, kBuilderSize, &actual_size);
EXPECT_EQ(0u, actual_size);
EXPECT_EQ(nullptr, writable_buffer);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_frame_builder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_frame_builder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
62e61c22-dfcd-4e95-b89f-f33ebb6697b3 | cpp | tensorflow/tensorflow | remote_profiler_session_manager | third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager.cc | third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager_test.cc | #include "xla/tsl/profiler/rpc/client/remote_profiler_session_manager.h"
#include <cstddef>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/profiler/rpc/client/profiler_client.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
using tensorflow::ProfileRequest;
using tensorflow::RemoteProfilerSessionManagerOptions;
std::unique_ptr<RemoteProfilerSessionManager>
RemoteProfilerSessionManager::Create(
const RemoteProfilerSessionManagerOptions& options,
const ProfileRequest& request, absl::Status& out_status,
AddressResolver resolver) {
VLOG(1) << "Creating a RemoteProfilerSessionManager.";
auto session_manager = absl::WrapUnique(
new RemoteProfilerSessionManager(options, request, resolver));
out_status = session_manager->Init();
if (!out_status.ok()) {
return nullptr;
}
return session_manager;
}
RemoteProfilerSessionManager::RemoteProfilerSessionManager(
RemoteProfilerSessionManagerOptions options, ProfileRequest request,
AddressResolver resolver)
: options_(options), request_(request) {
if (resolver) {
resolver_ = resolver;
} else {
resolver_ = [](absl::string_view addr) { return std::string(addr); };
}
}
RemoteProfilerSessionManager::~RemoteProfilerSessionManager() {
VLOG(2) << "Destroying RemoteProfilerSessionManager.";
}
absl::Status RemoteProfilerSessionManager::Init() {
mutex_lock lock(mutex_);
VLOG(1) << "SessionManager initializing.";
const absl::Time session_created_ts =
absl::FromUnixNanos(options_.session_creation_timestamp_ns());
const absl::Time deadline =
session_created_ts +
absl::Milliseconds(options_.max_session_duration_ms());
LOG(INFO) << "Deadline set to " << deadline
<< " because max_session_duration_ms was "
<< options_.max_session_duration_ms()
<< " and session_creation_timestamp_ns was "
<< options_.session_creation_timestamp_ns() << " ["
<< session_created_ts << "]";
clients_.reserve(options_.service_addresses_size());
ProfileRequest request = request_;
for (auto& service_address : options_.service_addresses()) {
std::string resolved_service_address = resolver_(service_address);
request.set_host_name(resolved_service_address);
auto client = RemoteProfilerSession::Create(resolved_service_address,
deadline, request);
clients_.push_back(std::move(client));
}
LOG(INFO) << "Issued Profile gRPC to " << clients_.size() << " clients";
return absl::OkStatus();
}
std::vector<RemoteProfilerSessionManager::Response>
RemoteProfilerSessionManager::WaitForCompletion() {
mutex_lock lock(mutex_);
std::vector<RemoteProfilerSessionManager::Response> remote_responses(
clients_.size());
for (int32_t idx = 0; idx < clients_.size(); ++idx) {
auto& remote_response = remote_responses[idx];
auto* client = clients_[idx].get();
remote_response.profile_response =
client->WaitForCompletion(remote_response.status);
remote_response.service_address = std::string(client->GetServiceAddress());
}
return remote_responses;
}
}
} | #include "xla/tsl/profiler/rpc/client/remote_profiler_session_manager.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/profiler/rpc/client/profiler_client_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
#include "tsl/profiler/protobuf/profiler_service.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::ProfileRequest;
using tensorflow::RemoteProfilerSessionManagerOptions;
using ::tsl::profiler::test::DurationApproxLess;
using ::tsl::profiler::test::DurationNear;
using ::tsl::profiler::test::StartServer;
using ::tsl::testing::TmpDir;
using Response = tsl::profiler::RemoteProfilerSessionManager::Response;
constexpr double kGracePeriodSeconds = 10.0;
ProfileRequest PopulateProfileRequest(
absl::string_view repository_root, absl::string_view session_id,
absl::string_view host_name,
const RemoteProfilerSessionManagerOptions& options) {
constexpr uint64 kMaxEvents = 1000000;
const absl::string_view kXPlanePb = "xplane.pb";
ProfileRequest request;
request.set_duration_ms(options.profiler_options().duration_ms());
request.set_max_events(kMaxEvents);
request.set_repository_root(repository_root.data(), repository_root.size());
request.set_session_id(session_id.data(), session_id.size());
request.set_host_name(host_name.data(), host_name.size());
request.add_tools(kXPlanePb.data(), kXPlanePb.size());
*request.mutable_opts() = options.profiler_options();
return request;
}
TEST(RemoteProfilerSessionManagerTest, Simple) {
absl::Duration duration = absl::Milliseconds(30);
RemoteProfilerSessionManagerOptions options;
*options.mutable_profiler_options() = tsl::ProfilerSession::DefaultOptions();
options.mutable_profiler_options()->set_duration_ms(
absl::ToInt64Milliseconds(duration));
std::string service_address;
auto server = StartServer(duration, &service_address);
options.add_service_addresses(service_address);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(kGracePeriodSeconds);
absl::Duration max_duration = duration + grace;
options.set_max_session_duration_ms(absl::ToInt64Milliseconds(max_duration));
options.set_session_creation_timestamp_ns(absl::ToUnixNanos(approx_start));
ProfileRequest request =
PopulateProfileRequest(TmpDir(), "session_id", service_address, options);
absl::Status status;
auto sessions =
RemoteProfilerSessionManager::Create(options, request, status);
EXPECT_TRUE(status.ok());
std::vector<Response> responses = sessions->WaitForCompletion();
absl::Duration elapsed = absl::Now() - approx_start;
ASSERT_EQ(responses.size(), 1);
EXPECT_TRUE(responses.back().status.ok());
EXPECT_TRUE(responses.back().profile_response->empty_trace());
EXPECT_EQ(responses.back().profile_response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
TEST(RemoteProfilerSessionManagerTest, ExpiredDeadline) {
absl::Duration duration = absl::Milliseconds(30);
RemoteProfilerSessionManagerOptions options;
*options.mutable_profiler_options() = tsl::ProfilerSession::DefaultOptions();
options.mutable_profiler_options()->set_duration_ms(
absl::ToInt64Milliseconds(duration));
std::string service_address;
auto server = StartServer(duration, &service_address);
options.add_service_addresses(service_address);
absl::Duration grace = absl::Seconds(kGracePeriodSeconds);
absl::Duration max_duration = duration + grace;
options.set_max_session_duration_ms(absl::ToInt64Milliseconds(max_duration));
options.set_session_creation_timestamp_ns(0);
absl::Time approx_start = absl::Now();
ProfileRequest request =
PopulateProfileRequest(TmpDir(), "session_id", service_address, options);
absl::Status status;
auto sessions =
RemoteProfilerSessionManager::Create(options, request, status);
EXPECT_TRUE(status.ok());
std::vector<Response> responses = sessions->WaitForCompletion();
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_THAT(elapsed, DurationNear(absl::Seconds(0)));
ASSERT_EQ(responses.size(), 1);
EXPECT_TRUE(absl::IsDeadlineExceeded(responses.back().status));
EXPECT_TRUE(responses.back().profile_response->empty_trace());
EXPECT_EQ(responses.back().profile_response->tool_data_size(), 0);
}
TEST(RemoteProfilerSessionManagerTest, LongSession) {
absl::Duration duration = absl::Seconds(3);
RemoteProfilerSessionManagerOptions options;
*options.mutable_profiler_options() = tsl::ProfilerSession::DefaultOptions();
options.mutable_profiler_options()->set_duration_ms(
absl::ToInt64Milliseconds(duration));
std::string service_address;
auto server = StartServer(duration, &service_address);
options.add_service_addresses(service_address);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(kGracePeriodSeconds);
absl::Duration max_duration = duration + grace;
options.set_max_session_duration_ms(absl::ToInt64Milliseconds(max_duration));
options.set_session_creation_timestamp_ns(absl::ToUnixNanos(approx_start));
ProfileRequest request =
PopulateProfileRequest(TmpDir(), "session_id", service_address, options);
absl::Status status;
auto sessions =
RemoteProfilerSessionManager::Create(options, request, status);
EXPECT_TRUE(status.ok());
std::vector<Response> responses = sessions->WaitForCompletion();
absl::Duration elapsed = absl::Now() - approx_start;
ASSERT_EQ(responses.size(), 1);
EXPECT_TRUE(responses.back().status.ok());
EXPECT_TRUE(responses.back().profile_response->empty_trace());
EXPECT_EQ(responses.back().profile_response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a805c5c-8d3f-4d0e-b222-6133bf4825b1 | cpp | google/tsl | cpu_info | tsl/platform/cpu_info.cc | tsl/platform/cpu_info_test.cc | #include "tsl/platform/cpu_info.h"
#include "absl/base/call_once.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/types.h"
#if defined(PLATFORM_IS_X86)
#include <mutex>
#endif
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
#include <sys/auxv.h>
#ifndef HWCAP_CPUID
#define HWCAP_CPUID (1 << 11)
#endif
#include <fstream>
#endif
#ifdef PLATFORM_IS_X86
#ifdef PLATFORM_WINDOWS
#define GETCPUID(a, b, c, d, a_inp, c_inp) \
{ \
int cpu_info[4] = {-1}; \
__cpuidex(cpu_info, a_inp, c_inp); \
a = cpu_info[0]; \
b = cpu_info[1]; \
c = cpu_info[2]; \
d = cpu_info[3]; \
}
#else
#define GETCPUID(a, b, c, d, a_inp, c_inp) \
asm("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
: "a"(a_inp), "2"(c_inp))
#endif
#endif
namespace tsl {
namespace port {
namespace {
#ifdef PLATFORM_IS_X86
class CPUIDInfo;
void InitCPUIDInfo();
CPUIDInfo *cpuid = nullptr;
#ifdef PLATFORM_WINDOWS
int GetXCR0EAX() { return _xgetbv(0); }
#else
int GetXCR0EAX() {
int eax, edx;
asm("XGETBV" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
}
#endif
class CPUIDInfo {
public:
CPUIDInfo()
: have_adx_(0),
have_aes_(0),
have_amx_bf16_(0),
have_amx_fp16_(0),
have_amx_int8_(0),
have_amx_tile_(0),
have_avx_(0),
have_avx2_(0),
have_avx512f_(0),
have_avx512cd_(0),
have_avx512er_(0),
have_avx512pf_(0),
have_avx512vl_(0),
have_avx512bw_(0),
have_avx512dq_(0),
have_avx512vbmi_(0),
have_avx512ifma_(0),
have_avx512_4vnniw_(0),
have_avx512_4fmaps_(0),
have_avx512_bf16_(0),
have_avx512_fp16_(0),
have_avx512_vnni_(0),
have_avx_vnni_(0),
have_avx_vnni_int8_(0),
have_avx_ne_convert_(0),
have_bmi1_(0),
have_bmi2_(0),
have_cmov_(0),
have_cmpxchg16b_(0),
have_cmpxchg8b_(0),
have_f16c_(0),
have_fma_(0),
have_mmx_(0),
have_pclmulqdq_(0),
have_popcnt_(0),
have_prefetchw_(0),
have_prefetchwt1_(0),
have_rdrand_(0),
have_rdseed_(0),
have_smap_(0),
have_sse_(0),
have_sse2_(0),
have_sse3_(0),
have_sse4_1_(0),
have_sse4_2_(0),
have_ssse3_(0),
have_hypervisor_(0) {}
static void Initialize() {
CHECK(cpuid == nullptr) << __func__ << " ran more than once";
cpuid = new CPUIDInfo;
uint32 eax, ebx, ecx, edx;
GETCPUID(eax, ebx, ecx, edx, 0, 0);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&ebx), 4);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&edx), 4);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&ecx), 4);
GETCPUID(eax, ebx, ecx, edx, 1, 0);
cpuid->model_num_ = static_cast<int>((eax >> 4) & 0xf);
cpuid->family_ = static_cast<int>((eax >> 8) & 0xf);
cpuid->have_aes_ = (ecx >> 25) & 0x1;
cpuid->have_cmov_ = (edx >> 15) & 0x1;
cpuid->have_cmpxchg16b_ = (ecx >> 13) & 0x1;
cpuid->have_cmpxchg8b_ = (edx >> 8) & 0x1;
cpuid->have_mmx_ = (edx >> 23) & 0x1;
cpuid->have_pclmulqdq_ = (ecx >> 1) & 0x1;
cpuid->have_popcnt_ = (ecx >> 23) & 0x1;
cpuid->have_rdrand_ = (ecx >> 30) & 0x1;
cpuid->have_sse2_ = (edx >> 26) & 0x1;
cpuid->have_sse3_ = ecx & 0x1;
cpuid->have_sse4_1_ = (ecx >> 19) & 0x1;
cpuid->have_sse4_2_ = (ecx >> 20) & 0x1;
cpuid->have_sse_ = (edx >> 25) & 0x1;
cpuid->have_ssse3_ = (ecx >> 9) & 0x1;
cpuid->have_hypervisor_ = (ecx >> 31) & 1;
const uint64 xcr0_xmm_mask = 0x2;
const uint64 xcr0_ymm_mask = 0x4;
const uint64 xcr0_maskreg_mask = 0x20;
const uint64 xcr0_zmm0_15_mask = 0x40;
const uint64 xcr0_zmm16_31_mask = 0x80;
const uint64 xcr0_avx_mask = xcr0_xmm_mask | xcr0_ymm_mask;
const uint64 xcr0_avx512_mask = xcr0_avx_mask | xcr0_maskreg_mask |
xcr0_zmm0_15_mask | xcr0_zmm16_31_mask;
const bool have_avx =
((ecx >> 27) & 0x1) &&
((GetXCR0EAX() & xcr0_avx_mask) == xcr0_avx_mask) &&
((ecx >> 28) & 0x1);
const bool have_avx512 =
((ecx >> 27) & 0x1) &&
((GetXCR0EAX() & xcr0_avx512_mask) == xcr0_avx512_mask);
cpuid->have_avx_ = have_avx;
cpuid->have_fma_ = have_avx && ((ecx >> 12) & 0x1);
cpuid->have_f16c_ = have_avx && ((ecx >> 29) & 0x1);
GETCPUID(eax, ebx, ecx, edx, 7, 0);
const uint32 kMaxNumSubLeaves = eax;
cpuid->have_adx_ = (ebx >> 19) & 0x1;
cpuid->have_avx2_ = have_avx && ((ebx >> 5) & 0x1);
cpuid->have_bmi1_ = (ebx >> 3) & 0x1;
cpuid->have_bmi2_ = (ebx >> 8) & 0x1;
cpuid->have_prefetchwt1_ = ecx & 0x1;
cpuid->have_rdseed_ = (ebx >> 18) & 0x1;
cpuid->have_smap_ = (ebx >> 20) & 0x1;
cpuid->have_avx512f_ = have_avx512 && ((ebx >> 16) & 0x1);
cpuid->have_avx512cd_ = have_avx512 && ((ebx >> 28) & 0x1);
cpuid->have_avx512er_ = have_avx512 && ((ebx >> 27) & 0x1);
cpuid->have_avx512pf_ = have_avx512 && ((ebx >> 26) & 0x1);
cpuid->have_avx512vl_ = have_avx512 && ((ebx >> 31) & 0x1);
cpuid->have_avx512bw_ = have_avx512 && ((ebx >> 30) & 0x1);
cpuid->have_avx512dq_ = have_avx512 && ((ebx >> 17) & 0x1);
cpuid->have_avx512vbmi_ = have_avx512 && ((ecx >> 1) & 0x1);
cpuid->have_avx512ifma_ = have_avx512 && ((ebx >> 21) & 0x1);
cpuid->have_avx512_4vnniw_ = have_avx512 && ((edx >> 2) & 0x1);
cpuid->have_avx512_4fmaps_ = have_avx512 && ((edx >> 3) & 0x1);
cpuid->have_avx512_vnni_ = have_avx512 && ((ecx >> 11) & 0x1);
cpuid->have_amx_tile_ = (edx >> 24) & 0x1;
cpuid->have_amx_int8_ = (edx >> 25) & 0x1;
cpuid->have_amx_bf16_ = (edx >> 22) & 0x1;
cpuid->have_avx512_fp16_ = have_avx512 && ((edx >> 23) & 0x1);
if (kMaxNumSubLeaves >= 1) {
GETCPUID(eax, ebx, ecx, edx, 7, 1);
cpuid->have_avx_vnni_ = (eax >> 4) & 0x1;
cpuid->have_avx512_bf16_ = have_avx512 && ((eax >> 5) & 0x1);
cpuid->have_amx_fp16_ = (eax >> 21) & 0x1;
cpuid->have_avx_vnni_int8_ = (edx >> 4) & 0x1;
cpuid->have_avx_ne_convert_ = (edx >> 5) & 0x1;
}
}
static bool TestFeature(CPUFeature feature) {
InitCPUIDInfo();
switch (feature) {
case ADX: return cpuid->have_adx_;
case AES: return cpuid->have_aes_;
case AMX_BF16: return cpuid->have_amx_bf16_;
case AMX_FP16: return cpuid->have_amx_fp16_;
case AMX_INT8: return cpuid->have_amx_int8_;
case AMX_TILE: return cpuid->have_amx_tile_;
case AVX2: return cpuid->have_avx2_;
case AVX: return cpuid->have_avx_;
case AVX512F: return cpuid->have_avx512f_;
case AVX512CD: return cpuid->have_avx512cd_;
case AVX512PF: return cpuid->have_avx512pf_;
case AVX512ER: return cpuid->have_avx512er_;
case AVX512VL: return cpuid->have_avx512vl_;
case AVX512BW: return cpuid->have_avx512bw_;
case AVX512DQ: return cpuid->have_avx512dq_;
case AVX512VBMI: return cpuid->have_avx512vbmi_;
case AVX512IFMA: return cpuid->have_avx512ifma_;
case AVX512_4VNNIW: return cpuid->have_avx512_4vnniw_;
case AVX512_4FMAPS: return cpuid->have_avx512_4fmaps_;
case AVX512_BF16: return cpuid->have_avx512_bf16_;
case AVX512_FP16: return cpuid->have_avx512_fp16_;
case AVX512_VNNI: return cpuid->have_avx512_vnni_;
case AVX_VNNI: return cpuid->have_avx_vnni_;
case AVX_VNNI_INT8: return cpuid->have_avx_vnni_int8_;
case AVX_NE_CONVERT: return cpuid->have_avx_ne_convert_;
case BMI1: return cpuid->have_bmi1_;
case BMI2: return cpuid->have_bmi2_;
case CMOV: return cpuid->have_cmov_;
case CMPXCHG16B: return cpuid->have_cmpxchg16b_;
case CMPXCHG8B: return cpuid->have_cmpxchg8b_;
case F16C: return cpuid->have_f16c_;
case FMA: return cpuid->have_fma_;
case MMX: return cpuid->have_mmx_;
case PCLMULQDQ: return cpuid->have_pclmulqdq_;
case POPCNT: return cpuid->have_popcnt_;
case PREFETCHW: return cpuid->have_prefetchw_;
case PREFETCHWT1: return cpuid->have_prefetchwt1_;
case RDRAND: return cpuid->have_rdrand_;
case RDSEED: return cpuid->have_rdseed_;
case SMAP: return cpuid->have_smap_;
case SSE2: return cpuid->have_sse2_;
case SSE3: return cpuid->have_sse3_;
case SSE4_1: return cpuid->have_sse4_1_;
case SSE4_2: return cpuid->have_sse4_2_;
case SSE: return cpuid->have_sse_;
case SSSE3: return cpuid->have_ssse3_;
case HYPERVISOR: return cpuid->have_hypervisor_;
default:
break;
}
return false;
}
string vendor_str() const { return vendor_str_; }
int family() const { return family_; }
int model_num() { return model_num_; }
private:
int have_adx_ : 1;
int have_aes_ : 1;
int have_amx_bf16_ : 1;
int have_amx_fp16_ : 1;
int have_amx_int8_ : 1;
int have_amx_tile_ : 1;
int have_avx_ : 1;
int have_avx2_ : 1;
int have_avx512f_ : 1;
int have_avx512cd_ : 1;
int have_avx512er_ : 1;
int have_avx512pf_ : 1;
int have_avx512vl_ : 1;
int have_avx512bw_ : 1;
int have_avx512dq_ : 1;
int have_avx512vbmi_ : 1;
int have_avx512ifma_ : 1;
int have_avx512_4vnniw_ : 1;
int have_avx512_4fmaps_ : 1;
int have_avx512_bf16_ : 1;
int have_avx512_fp16_ : 1;
int have_avx512_vnni_ : 1;
int have_avx_vnni_ : 1;
int have_avx_vnni_int8_ : 1;
int have_avx_ne_convert_ : 1;
int have_bmi1_ : 1;
int have_bmi2_ : 1;
int have_cmov_ : 1;
int have_cmpxchg16b_ : 1;
int have_cmpxchg8b_ : 1;
int have_f16c_ : 1;
int have_fma_ : 1;
int have_mmx_ : 1;
int have_pclmulqdq_ : 1;
int have_popcnt_ : 1;
int have_prefetchw_ : 1;
int have_prefetchwt1_ : 1;
int have_rdrand_ : 1;
int have_rdseed_ : 1;
int have_smap_ : 1;
int have_sse_ : 1;
int have_sse2_ : 1;
int have_sse3_ : 1;
int have_sse4_1_ : 1;
int have_sse4_2_ : 1;
int have_ssse3_ : 1;
int have_hypervisor_ : 1;
string vendor_str_;
int family_;
int model_num_;
};
absl::once_flag cpuid_once_flag;
void InitCPUIDInfo() {
absl::call_once(cpuid_once_flag, CPUIDInfo::Initialize);
}
#endif
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
class CPUIDInfo;
void InitCPUIDInfo();
CPUIDInfo *cpuid = nullptr;
class CPUIDInfo {
public:
CPUIDInfo()
: implementer_(0),
variant_(0),
cpunum_(0),
is_arm_neoverse_v1_(0),
is_arm_neoverse_n1_(0) {}
static void Initialize() {
if (cpuid != nullptr) return;
cpuid = new CPUIDInfo;
if (!(getauxval(AT_HWCAP) & HWCAP_CPUID)) {
return;
}
int present_cpu = -1;
#ifndef PLATFORM_WINDOWS
std::ifstream CPUspresent;
CPUspresent.open("/sys/devices/system/cpu/present", std::ios::in);
if (CPUspresent.is_open()) {
std::string line;
if (static_cast<bool>(getline(CPUspresent, line))) {
auto ending = line.end();
for (auto i = line.begin(); i < line.end(); ++i) {
if (*i == '-' || *i == ',') {
ending = i;
break;
}
}
line.erase(ending, line.end());
present_cpu = std::stoi(line);
}
}
#endif
if (present_cpu == -1) {
return;
}
#ifndef PLATFORM_WINDOWS
std::stringstream str;
str << "/sys/devices/system/cpu/cpu" << present_cpu
<< "/regs/identification/midr_el1";
std::ifstream midr_el1_file(str.str(), std::ios::in);
if (midr_el1_file.is_open()) {
std::string line;
if (static_cast<bool>(getline(midr_el1_file, line))) {
uint32 midr_el1 = std::stoul(line, nullptr, 16);
cpuid->implementer_ = (midr_el1 >> 24) & 0xFF;
cpuid->variant_ = (midr_el1 >> 20) & 0xF;
cpuid->cpunum_ = (midr_el1 >> 4) & 0xFFF;
if (cpuid->implementer_ == 0x41) {
switch (cpuid->cpunum_) {
case 0xd40:
cpuid->is_arm_neoverse_v1_ = 1;
break;
case 0xd0c:
cpuid->is_arm_neoverse_n1_ = 1;
break;
default:
break;
}
}
}
}
#endif
}
int implementer() const { return implementer_; }
int cpunum() const { return cpunum_; }
static bool TestAarch64CPU(Aarch64CPU cpu) {
InitCPUIDInfo();
switch (cpu) {
case ARM_NEOVERSE_V1:
return cpuid->is_arm_neoverse_v1_;
default:
return 0;
}
}
private:
int implementer_;
int variant_;
int cpunum_;
int is_arm_neoverse_v1_;
int is_arm_neoverse_n1_;
};
absl::once_flag cpuid_once_flag;
void InitCPUIDInfo() {
absl::call_once(cpuid_once_flag, CPUIDInfo::Initialize);
}
#endif
}
bool TestCPUFeature(CPUFeature feature) {
#ifdef PLATFORM_IS_X86
return CPUIDInfo::TestFeature(feature);
#else
return false;
#endif
}
bool TestAarch64CPU(Aarch64CPU cpu) {
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
return CPUIDInfo::TestAarch64CPU(cpu);
#else
return false;
#endif
}
std::string CPUVendorIDString() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->vendor_str();
#else
return "";
#endif
}
int CPUFamily() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->family();
#elif defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
InitCPUIDInfo();
return cpuid->implementer();
#else
return 0;
#endif
}
int CPUModelNum() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->model_num();
#elif defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
InitCPUIDInfo();
return cpuid->cpunum();
#else
return 0;
#endif
}
int CPUIDNumSMT() {
#ifdef PLATFORM_IS_X86
uint32 eax, ebx, ecx, edx;
GETCPUID(eax, ebx, ecx, edx, 0, 0);
if (eax >= 11) {
GETCPUID(eax, ebx, ecx, edx, 11, 0);
if (ebx != 0 && ((ecx & 0xff00) >> 8) == 1) {
return 1 << (eax & 0x1f);
}
}
#endif
return 0;
}
}
} | #include "tsl/platform/cpu_info.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(CPUInfo, CommonX86CPU) {
if (port::TestCPUFeature(port::CPUFeature::SSE)) {
EXPECT_TRUE(port::IsX86CPU());
}
}
TEST(CPUInfo, Aarch64NeoverseV1CPU) {
if (port::TestAarch64CPU(port::Aarch64CPU::ARM_NEOVERSE_V1)) {
EXPECT_TRUE(port::IsAarch64CPU());
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cpu_info.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cpu_info_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |