ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 14
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 411
31.4k
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
|
---|---|---|---|---|---|---|---|
1,800 | cpp | tensorflow/tensorflow | basic_string_array | third_party/xla/xla/python/pjrt_ifrt/basic_string_array.cc | third_party/xla/xla/python/pjrt_ifrt/basic_string_array_test.cc | #ifndef XLA_PYTHON_PJRT_IFRT_BASIC_STRING_ARRAY_H_
#define XLA_PYTHON_PJRT_IFRT_BASIC_STRING_ARRAY_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/inlined_vector.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
namespace xla {
namespace ifrt {
class BasicStringArrayLayout : public PjRtLayout {
public:
BasicStringArrayLayout() = default;
BasicStringArrayLayout(const BasicStringArrayLayout& other) = delete;
~BasicStringArrayLayout() override = default;
std::string Serialize() const override;
std::string ToString() const override;
bool operator==(const PjRtLayout& other) const override;
protected:
void Hash(absl::HashState state) const override;
};
class BasicStringArray final
: public llvm::RTTIExtends<BasicStringArray, Array> {
public:
using Buffer = absl::Span<const absl::string_view>;
static constexpr int kBuffersInlineSize = 1;
using Buffers = absl::InlinedVector<Buffer, kBuffersInlineSize>;
using OnDoneWithBuffer = std::function<void()>;
static absl::StatusOr<tsl::RCReference<BasicStringArray>> Create(
Client* client, Shape shape, std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers, OnDoneWithBuffer on_done_with_buffer);
~BasicStringArray() override;
absl::StatusOr<tsl::RCReference<Array>> FullyReplicatedShard(
ArrayCopySemantics semantics) override;
Client* client() const override {
DCHECK(this);
return client_;
}
DType dtype() const override {
DCHECK(this);
return DType(DType::kString);
}
const Shape& shape() const override {
DCHECK(this);
return shape_;
}
const Sharding& sharding() const override {
DCHECK(this);
return *sharding_;
}
std::shared_ptr<const Sharding> shared_ptr_sharding() const override {
DCHECK(this);
return sharding_;
}
absl::StatusOr<std::unique_ptr<PjRtLayout>> layout() const override;
absl::StatusOr<std::vector<tsl::RCReference<Array>>>
DisassembleIntoSingleDeviceArrays(ArrayCopySemantics semantics) override;
ABSL_MUST_USE_RESULT
Future<> CopyToHostBuffer(
void* data, std::optional<absl::Span<const int64_t>> byte_strides,
ArrayCopySemantics semantics) override;
absl::StatusOr<tsl::RCReference<Array>> Copy(
std::optional<xla::ifrt::DeviceList> devices,
std::optional<xla::ifrt::MemoryKind> memory_kind,
ArrayCopySemantics semantics);
Future<> GetReadyFuture() const override;
Future<> Delete() override;
bool IsDeleted() const override;
std::string DebugString() const override;
Future<Buffers> buffers() const {
return buffers_;
}
static char ID;
private:
template <typename T, typename... Args>
friend tsl::RCReference<T> tsl::MakeRef(Args&&... args);
BasicStringArray(Client* client, Shape shape,
std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers, Future<> ready_future,
OnDoneWithBuffer on_done_with_buffer);
void DeleteInternal() ABSL_LOCKS_EXCLUDED(mu_);
Client* client_;
Shape shape_;
std::shared_ptr<const Sharding> sharding_;
Future<Buffers> buffers_;
Future<> ready_future_;
mutable absl::Mutex mu_;
OnDoneWithBuffer on_done_with_buffer_ ABSL_GUARDED_BY(mu_);
bool is_deleted_ ABSL_GUARDED_BY(mu_) = false;
};
}
}
#endif
#include "xla/python/pjrt_ifrt/basic_string_array.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
std::string BasicStringArrayLayout::Serialize() const {
return std::string();
}
std::string BasicStringArrayLayout::ToString() const {
return "BasicStringArrayLayout: Dense, major-to-minor.";
}
bool BasicStringArrayLayout::operator==(const PjRtLayout& other) const {
auto* other_basic_string_array_layout =
dynamic_cast<const xla::ifrt::BasicStringArrayLayout*>(&other);
if (other_basic_string_array_layout == nullptr) {
return false;
}
return true;
}
void BasicStringArrayLayout::Hash(absl::HashState state) const {
}
char BasicStringArray::ID = 0;
absl::StatusOr<tsl::RCReference<BasicStringArray>> BasicStringArray::Create(
Client* client, Shape shape, std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers, OnDoneWithBuffer on_done_with_buffer) {
if (!buffers.IsValid()) {
return absl::InvalidArgumentError("Got buffers_ future is invalid");
}
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto ready_promise = Future<>::CreatePromise();
auto ready_future = Future<>(ready_promise);
auto buffer_validator =
[buffers_promise = std::move(buffers_promise),
ready_promise = std::move(ready_promise),
sharding = sharding](absl::StatusOr<Buffers> buffers) mutable {
if (!buffers.ok()) {
buffers_promise.Set(buffers.status());
ready_promise.Set(buffers.status());
return;
}
if (sharding->devices().size() != (*buffers).size()) {
auto error = absl::FailedPreconditionError(absl::StrCat(
"Number of buffers: ", (*buffers).size(),
" does not match the number of devices in sharding: ",
sharding->devices().size()));
buffers_promise.Set(error);
ready_promise.Set(error);
return;
}
buffers_promise.Set(std::move(buffers));
ready_promise.Set(absl::OkStatus());
};
buffers.OnReady(std::move(buffer_validator));
return tsl::MakeRef<BasicStringArray>(
client, std::move(shape), std::move(sharding), std::move(buffers_future),
std::move(ready_future), std::move(on_done_with_buffer));
}
BasicStringArray::BasicStringArray(Client* client, Shape shape,
std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers,
Future<> ready_future,
OnDoneWithBuffer on_done_with_buffer)
: client_(client),
shape_(std::move(shape)),
sharding_(std::move(sharding)),
buffers_(std::move(buffers)),
ready_future_(std::move(ready_future)),
on_done_with_buffer_(std::move(on_done_with_buffer)) {}
BasicStringArray::~BasicStringArray() { DeleteInternal(); }
Future<> BasicStringArray::Delete() {
DeleteInternal();
return Future<>(absl::OkStatus());
}
bool BasicStringArray::IsDeleted() const {
absl::MutexLock lock(&mu_);
return is_deleted_;
}
void BasicStringArray::DeleteInternal() {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return;
}
if (on_done_with_buffer_) {
std::move(on_done_with_buffer_)();
}
is_deleted_ = true;
}
Future<> BasicStringArray::GetReadyFuture() const {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return Future<>(
absl::FailedPreconditionError("Array has already been deleted"));
}
return ready_future_;
}
absl::StatusOr<std::vector<tsl::RCReference<Array>>>
BasicStringArray::DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics semantics) {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
int num_shards = sharding_->devices().size();
std::vector<Promise<Buffers>> buffer_promises;
buffer_promises.reserve(num_shards);
std::vector<Future<Buffers>> buffer_futures;
buffer_futures.reserve(num_shards);
struct PerShardBufferBackingStore {
void CopyFrom(absl::Span<const absl::string_view> input_buffer) {
strings.reserve(input_buffer.size());
string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
strings.push_back(std::string(buf.data(), buf.size()));
string_views.push_back(strings.back());
}
}
std::vector<std::string> strings;
std::vector<absl::string_view> string_views;
};
std::vector<std::shared_ptr<PerShardBufferBackingStore>>
per_shard_buffer_backing_stores;
per_shard_buffer_backing_stores.reserve(num_shards);
std::vector<OnDoneWithBuffer> on_done_with_buffer_callbacks;
on_done_with_buffer_callbacks.reserve(num_shards);
for (int i = 0; i < num_shards; ++i) {
buffer_promises.push_back(Future<Buffers>::CreatePromise());
buffer_futures.push_back(Future<Buffers>(buffer_promises.back()));
auto backing_store = std::make_shared<PerShardBufferBackingStore>();
per_shard_buffer_backing_stores.push_back(backing_store);
on_done_with_buffer_callbacks.push_back(
[backing_store = std::move(backing_store)]() {});
}
buffers_.OnReady([buffer_promises = std::move(buffer_promises),
per_shard_buffer_backing_stores =
std::move(per_shard_buffer_backing_stores)](
absl::StatusOr<Buffers> buffers) mutable {
if (!buffers.ok()) {
for (auto& promise : buffer_promises) {
promise.Set(buffers.status());
}
per_shard_buffer_backing_stores.clear();
return;
}
auto num_shards = buffers->size();
for (int i = 0; i < num_shards; ++i) {
per_shard_buffer_backing_stores[i]->CopyFrom((*buffers)[i]);
Buffers buffers;
buffers.push_back(per_shard_buffer_backing_stores[i]->string_views);
buffer_promises[i].Set(std::move(buffers));
}
});
TF_ASSIGN_OR_RETURN(auto shapes_and_shadings, sharding_->Disassemble(shape_));
std::vector<tsl::RCReference<Array>> arrays;
arrays.reserve(num_shards);
for (int i = 0; i < num_shards; ++i) {
TF_ASSIGN_OR_RETURN(auto array,
BasicStringArray::Create(
client_, std::move(shapes_and_shadings[i].first),
std::move(shapes_and_shadings[i].second),
std::move(buffer_futures[i]),
std::move(on_done_with_buffer_callbacks[i])));
arrays.push_back(array);
}
return arrays;
}
Future<> BasicStringArray::CopyToHostBuffer(
void* data, std::optional<absl::Span<const int64_t>> byte_strides,
ArrayCopySemantics semantics) {
DCHECK(this);
return Future<>(absl::UnimplementedError("Not implemented"));
}
absl::StatusOr<tsl::RCReference<Array>> BasicStringArray::Copy(
std::optional<xla::ifrt::DeviceList> devices,
std::optional<xla::ifrt::MemoryKind> memory_kind,
ArrayCopySemantics semantics) {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
TF_ASSIGN_OR_RETURN(auto new_sharding,
sharding().WithDeviceAssignment(devices, memory_kind));
if (new_sharding->devices().size() != sharding_->devices().size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Number of devices in new sharding: ", new_sharding->devices().size(),
" does not match the number of devices in the current sharding: ",
sharding_->devices().size()));
}
struct BufferBackingStore {
void AddShardData(absl::Span<const absl::string_view> input_buffer) {
auto& shard_strings = strings.emplace_back();
shard_strings.reserve(input_buffer.size());
auto& shard_string_views = string_views.emplace_back();
shard_string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
shard_strings.push_back(std::string(buf.data(), buf.size()));
shard_string_views.push_back(shard_strings.back());
}
}
std::vector<std::vector<std::string>> strings;
std::vector<std::vector<absl::string_view>> string_views;
};
auto backing_store = std::make_shared<BufferBackingStore>();
auto on_done_with_buffer = [backing_store]() {};
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto copier = [backing_store = std::move(backing_store),
buffers_promise = std::move(buffers_promise)](
absl::StatusOr<Buffers> input_buffers) mutable {
if (!input_buffers.ok()) {
buffers_promise.Set(input_buffers.status());
return;
}
Buffers buffers;
buffers.reserve(input_buffers->size());
for (auto& input_buffer : *input_buffers) {
backing_store->AddShardData(input_buffer);
buffers.push_back(backing_store->string_views.back());
}
buffers_promise.Set(std::move(buffers));
};
buffers_.OnReady(std::move(copier));
return BasicStringArray::Create(client_, shape_, std::move(new_sharding),
std::move(buffers_future),
std::move(on_done_with_buffer));
}
absl::StatusOr<tsl::RCReference<Array>> BasicStringArray::FullyReplicatedShard(
ArrayCopySemantics semantics) {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
if (!sharding_->IsFullyReplicated()) {
return absl::FailedPreconditionError("This array is not fully replicated");
}
struct BufferBackingStore {
void CopyFrom(absl::Span<const absl::string_view> input_buffer) {
strings.reserve(input_buffer.size());
string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
strings.push_back(std::string(buf.data(), buf.size()));
string_views.push_back(strings.back());
}
}
std::vector<std::string> strings;
std::vector<absl::string_view> string_views;
};
auto backing_store = std::make_shared<BufferBackingStore>();
auto on_done_with_buffer = [backing_store]() {};
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto copier = [backing_store = std::move(backing_store),
buffers_promise = std::move(buffers_promise)](
absl::StatusOr<Buffers> input_buffers) mutable {
if (!input_buffers.ok()) {
buffers_promise.Set(input_buffers.status());
return;
}
auto& input_buffer = (*input_buffers)[0];
backing_store->CopyFrom(input_buffer);
Buffers buffers;
buffers.push_back(backing_store->string_views);
buffers_promise.Set(std::move(buffers));
};
buffers_.OnReady(std::move(copier));
return BasicStringArray::Create(
client_, shape_,
SingleDeviceSharding::Create(sharding_->devices().at(0), MemoryKind()),
std::move(buffers_future), std::move(on_done_with_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtLayout>> BasicStringArray::layout() const {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
return std::make_unique<BasicStringArrayLayout>();
}
std::string BasicStringArray::DebugString() const {
DCHECK(this);
return absl::StrFormat(
"BasicStringArray(shape=%s; sharding=%s; layout=major-to-minor-dense)",
shape_.DebugString(), sharding_->DebugString());
}
}
} | #include "xla/python/pjrt_ifrt/basic_string_array.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
absl::StatusOr<tsl::RCReference<BasicStringArray>> CreateTestArray(
Client* client, Future<BasicStringArray::Buffers> buffers,
BasicStringArray::OnDoneWithBuffer on_done_with_buffer) {
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
return BasicStringArray::Create(client, shape, sharding, std::move(buffers),
std::move(on_done_with_buffer));
}
std::pair<BasicStringArray::Buffers, BasicStringArray::OnDoneWithBuffer>
MakeBuffersAndOnDoneWithBuffer(
absl::Span<const absl::string_view> input_strings) {
BasicStringArray::Buffers buffers;
auto string_holder = std::make_shared<std::vector<std::string>>();
string_holder->reserve(input_strings.size());
auto string_view_holder = std::make_shared<std::vector<absl::string_view>>();
string_view_holder->reserve(input_strings.size());
for (const auto str : input_strings) {
string_holder->push_back(std::string(str));
}
for (const auto& str : *string_holder) {
string_view_holder->push_back(absl::string_view(str));
}
buffers.push_back(*string_view_holder);
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[string_holder = std::move(string_holder),
string_view_holder = std::move(string_view_holder)]() {};
return std::make_pair(std::move(buffers), std::move(on_done_with_buffer));
}
absl::StatusOr<std::pair<tsl::RCReference<BasicStringArray>,
Promise<BasicStringArray::Buffers>>>
CreateNonReadyTestArray(
Client* client, Device* const device,
BasicStringArray::OnDoneWithBuffer on_done_with_buffer) {
auto buffers_promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(buffers_promise);
Shape shape({1});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
TF_ASSIGN_OR_RETURN(auto array,
BasicStringArray::Create(client, shape, sharding,
std::move(buffers_future),
std::move(on_done_with_buffer)));
return std::make_pair(std::move(array), std::move(buffers_promise));
}
TEST(BasicStringArrayLayoutTest, Serialize) {
BasicStringArrayLayout layout;
EXPECT_TRUE(layout.Serialize().empty());
}
TEST(BasicStringArrayLayoutTest, ToString) {
BasicStringArrayLayout layout;
auto output_str = layout.ToString();
EXPECT_THAT(output_str, HasSubstr("major-to-minor"));
}
TEST(BasicStringArrayLayoutTest, Equality) {
BasicStringArrayLayout layout_1;
BasicStringArrayLayout layout_2;
const PjRtLayout& layout_3 = layout_2;
EXPECT_EQ(layout_1, layout_3);
xla::PjRtXlaLayout layout_6((xla::Layout()));
const PjRtLayout& layout_7 = layout_6;
EXPECT_FALSE(layout_7 == layout_1);
}
TEST(BasicStringArrayTest, CreateSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
TF_EXPECT_OK(CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(buffers),
nullptr));
}
TEST(BasicStringArrayTest, CreateFailureWithInvalidFuture) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
EXPECT_THAT(CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(),
nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(BasicStringArrayTest, Destruction) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
absl::Notification on_done_with_buffer_called;
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[&on_done_with_buffer_called]() { on_done_with_buffer_called.Notify(); };
auto array_creation_status_promise = PjRtFuture<>::CreatePromise();
tsl::Env::Default()->SchedClosure(([&]() {
auto array = CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer));
array_creation_status_promise.Set(array.status());
}));
TF_ASSERT_OK(Future<>(array_creation_status_promise).Await());
on_done_with_buffer_called.WaitForNotification();
}
TEST(BasicStringArrayTest, InvalidBuffersAreHandledCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 1);
auto shard0_data = std::make_shared<std::vector<absl::string_view>>();
shard0_data->push_back("abc");
auto shard1_data = std::make_shared<std::vector<absl::string_view>>();
shard1_data->push_back("def");
BasicStringArray::Buffers buffers;
buffers.push_back(*shard0_data);
buffers.push_back(*shard1_data);
auto on_done_with_buffer = [shard0_data = std::move(shard0_data),
shard1_data = std::move(shard1_data)]() {};
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
auto array = ret.first;
auto promise = ret.second;
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
tsl::Env::Default()->SchedClosure([&]() { promise.Set(buffers); });
EXPECT_THAT(basic_string_array->GetReadyFuture().Await(),
StatusIs(absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(basic_string_array->buffers().Await(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(BasicStringArrayTest, Delete) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
absl::Notification on_done_with_buffer_called;
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[&on_done_with_buffer_called]() { on_done_with_buffer_called.Notify(); };
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
tsl::Env::Default()->SchedClosure([&]() { array->Delete(); });
on_done_with_buffer_called.WaitForNotification();
EXPECT_TRUE(array->IsDeleted());
}
TEST(GetReadyFutureTest, SuccessCase) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(promise);
TF_ASSERT_OK_AND_ASSIGN(auto array,
CreateTestArray(client.get(), buffers_future,
nullptr));
auto ready_future = array->GetReadyFuture();
EXPECT_FALSE(ready_future.IsKnownReady());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
tsl::Env::Default()->SchedClosure([&]() { promise.Set(buffers); });
TF_EXPECT_OK(ready_future.Await());
}
TEST(GetReadyFutureTest, FailureCases) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(promise);
TF_ASSERT_OK_AND_ASSIGN(auto array,
CreateTestArray(client.get(), buffers_future,
nullptr));
auto ready_future = array->GetReadyFuture();
EXPECT_FALSE(ready_future.IsKnownReady());
tsl::Env::Default()->SchedClosure(
[&]() { promise.Set(absl::InternalError("injected error")); });
EXPECT_THAT(ready_future.Await(), StatusIs(absl::StatusCode::kInternal));
}
TEST(MakeArrayFromHostBufferTest, SuccessCase) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->push_back("abc");
string_views->push_back("def");
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
TF_ASSERT_OK(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, std::move(sharding),
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
std::move(on_done_with_host_buffer)));
}
TEST(MakeArrayFromHostBufferTest, FailureCases) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> single_device_sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->push_back("abc");
string_views->push_back("def");
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
EXPECT_THAT(
client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::optional<absl::Span<const int64_t>>({8}),
single_device_sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
std::shared_ptr<const Sharding> opaque_sharding =
OpaqueSharding::Create(DeviceList({device}), MemoryKind());
EXPECT_THAT(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, opaque_sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
for (Client::HostBufferSemantics host_buffer_semantics :
{Client::HostBufferSemantics::kImmutableUntilTransferCompletes,
Client::HostBufferSemantics::kImmutableZeroCopy,
Client::HostBufferSemantics::kMutableZeroCopy}) {
SCOPED_TRACE(
absl::StrCat("host_buffer_semantics: ", host_buffer_semantics));
EXPECT_THAT(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, single_device_sharding,
host_buffer_semantics, on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
absl::StatusOr<tsl::RCReference<Array>> MakeSingleDeviceStringTestArray(
absl::Span<const std::string> contents, Client* client,
Device* const device) {
Shape shape({1});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
for (const auto& content : contents) {
string_views->push_back(content);
}
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
return client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, std::move(sharding),
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
std::move(on_done_with_host_buffer));
}
absl::StatusOr<tsl::RCReference<Array>> MakeSingleDeviceFloatTestArray(
Client* client, Device* const device) {
DType dtype(DType::kF32);
Shape shape({2, 3});
auto data = std::make_unique<std::vector<float>>(6);
std::iota(data->begin(), data->end(), 0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
return client->MakeArrayFromHostBuffer(
data->data(), dtype, shape,
std::nullopt, sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr);
}
absl::StatusOr<tsl::RCReference<Array>> MakeShardedStringTestArray(
Client* client, absl::Span<const std::string> data,
bool is_fully_replicated) {
if (data.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Input data has too few strings. Need at least 2. got: ", data.size()));
}
auto devices = client->addressable_devices();
if (devices.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Test client has too few devices. Need 2, got:", devices.size()));
}
std::shared_ptr<const Sharding> sharding = ConcreteEvenSharding::Create(
DeviceList({devices[0], devices[1]}), MemoryKind(), Shape({2, 1}),
Shape({1}), is_fully_replicated);
std::vector<tsl::RCReference<Array>> arrays;
for (int i = 0; i < 2; ++i) {
TF_ASSIGN_OR_RETURN(auto array, MakeSingleDeviceStringTestArray(
{data[i]}, client, devices[i]));
arrays.push_back(std::move(array));
}
return client->AssembleArrayFromSingleDeviceArrays(
Shape({2, 1}), std::move(sharding), absl::MakeSpan(arrays),
ArrayCopySemantics::kAlwaysCopy);
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
SuccessWithReadySingleDeviceArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"shard 0", "shard 1"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto buffers, basic_string_array->buffers().Await());
EXPECT_EQ(buffers.size(), 2);
for (int i = 0; i < buffers.size(); ++i) {
SCOPED_TRACE(absl::StrCat("buffer #", i));
auto buffer = buffers[i];
EXPECT_THAT(buffer, testing::ElementsAre(per_shard_contents[i]));
}
}
TEST(AssembleArrayFromSingleDeviceArraysTest, FailsWithNonStringArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
DeviceList({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays(2);
TF_ASSERT_OK_AND_ASSIGN(
arrays[0], MakeSingleDeviceFloatTestArray(client.get(), devices[0]));
TF_ASSERT_OK_AND_ASSIGN(
arrays[1], MakeSingleDeviceStringTestArray({"string_array_contents"},
client.get(), devices[1]));
EXPECT_THAT(client->AssembleArrayFromSingleDeviceArrays(
Shape({2}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FailsWithNonSingleDeviceStringArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
DeviceList({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays(2);
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
arrays[0], MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(
arrays[1], MakeSingleDeviceStringTestArray({"string_array_contents"},
client.get(), devices[1]));
EXPECT_THAT(client->AssembleArrayFromSingleDeviceArrays(
Shape({2}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FromNonReadySingleDeviceArraysSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
DeviceList({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays;
std::vector<Promise<BasicStringArray::Buffers>> promises;
arrays.reserve(2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers0 = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer0 = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer0)));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"def"});
auto buffers1 = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer1 = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
ret, CreateNonReadyTestArray(client.get(), devices[1],
std::move(on_done_with_buffer1)));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
auto array, client->AssembleArrayFromSingleDeviceArrays(
Shape({1}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy));
tsl::Env::Default()->SchedClosure(([&]() mutable {
promises[0].Set(buffers0);
promises[1].Set(buffers1);
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
TF_ASSERT_OK_AND_ASSIGN(auto buffers, buffers_future.Await());
EXPECT_EQ(buffers.size(), 2);
EXPECT_THAT(buffers[0], testing::ElementsAre("abc"));
EXPECT_THAT(buffers[1], testing::ElementsAre("def"));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FromNonReadySingleDeviceArraysFailure) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
DeviceList({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays;
std::vector<Promise<BasicStringArray::Buffers>> promises;
arrays.reserve(2);
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
nullptr));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
ret, CreateNonReadyTestArray(client.get(), devices[1],
nullptr));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
auto array, client->AssembleArrayFromSingleDeviceArrays(
Shape({1}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promises[0].Set(absl::InternalError("injected from the test"));
promises[1].Set(absl::InternalError("injected from the test"));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
EXPECT_THAT(buffers_future.Await(),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("injected from the test")));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(DisassembleArrayIntoSingleDeviceArrays,
SingleDeviceArrayDisassembleSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
array->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 1);
auto basic_string_array =
llvm::dyn_cast<BasicStringArray>(disassembled_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
basic_string_array->buffers().Await());
ASSERT_EQ(buffers.size(), 1);
EXPECT_THAT(buffers[0], testing::ElementsAre("abc"));
}
TEST(DisassembleArrayIntoSingleDeviceArrays, ShardedArrayDisassembleSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
array->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 2);
for (int i = 0; i < disassembled_arrays.size(); ++i) {
SCOPED_TRACE(absl::StrCat("dissembled array: ", i));
auto basic_string_array =
llvm::dyn_cast<BasicStringArray>(disassembled_arrays[i].get());
TF_ASSERT_OK_AND_ASSIGN(auto buffer, basic_string_array->buffers().Await());
ASSERT_EQ(buffer.size(), 1);
EXPECT_THAT(buffer[0], testing::ElementsAre(per_shard_contents[i]));
}
}
TEST(DisassembleArrayIntoSingleDeviceArrays, FailsIfTheArrayHasBeenDeleted) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(
array->DisassembleIntoSingleDeviceArrays(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(CopyTest, SuccessSingleDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(
auto new_arrays,
client->CopyArrays(absl::MakeSpan(arrays), DeviceList({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
auto new_basic_string_array =
llvm::dyn_cast<BasicStringArray>(new_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
new_basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 1);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("abc"));
}
TEST(CopyTest, SuccessMultiDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 4);
const std::vector<std::string> per_shard_contents({"shard 0", "shard 1"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(
auto new_arrays,
client->CopyArrays(absl::MakeSpan(arrays),
DeviceList({devices[2], devices[3]}), MemoryKind(),
ArrayCopySemantics::kAlwaysCopy));
auto new_basic_string_array =
llvm::dyn_cast<BasicStringArray>(new_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
new_basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 2);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("shard 0"));
EXPECT_THAT(new_buffers[1], testing::ElementsAre("shard 1"));
}
TEST(CopyTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
arrays[0]->Delete();
EXPECT_THAT(
client->CopyArrays(absl::MakeSpan(arrays), DeviceList({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(CopyTest, FailsWithDifferentNumbersDevices) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
EXPECT_THAT(client->CopyArrays(absl::MakeSpan(arrays),
DeviceList({devices[0], devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(CopyTest, NonReadySourceArraySuccessfullyBecomesReadyAfterCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto buf_and_on_done_with_buff |
1,801 | cpp | tensorflow/tensorflow | pjrt_attribute_map_util | third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util.cc | third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util_test.cc | #ifndef XLA_PYTHON_PJRT_IFRT_PJRT_ATTRIBUTE_MAP_UTIL_H_
#define XLA_PYTHON_PJRT_IFRT_PJRT_ATTRIBUTE_MAP_UTIL_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
AttributeMap FromPjRtDeviceAttributeMap(
absl::flat_hash_map<std::string, xla::PjRtValueType> attributes);
absl::flat_hash_map<std::string, xla::PjRtValueType> ToPjRtDeviceAttributeMap(
AttributeMap attributes);
}
}
#endif
#include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
AttributeMap FromPjRtDeviceAttributeMap(
absl::flat_hash_map<std::string, xla::PjRtValueType> attributes) {
AttributeMap::Map result;
result.reserve(attributes.size());
for (auto& item : attributes) {
std::visit(
[&](auto& value) {
using T = std::decay_t<decltype(value)>;
const auto& key = item.first;
if constexpr (std::is_same_v<T, std::string>) {
result.insert({key, AttributeMap::StringValue(std::move(value))});
} else if constexpr (std::is_same_v<T, bool>) {
result.insert({key, AttributeMap::BoolValue(value)});
} else if constexpr (std::is_same_v<T, int64_t>) {
result.insert({key, AttributeMap::Int64Value(value)});
} else if constexpr (std::is_same_v<T, std::vector<int64_t>>) {
result.insert(
{key, AttributeMap::Int64ListValue(std::move(value))});
} else if constexpr (std::is_same_v<T, float>) {
result.insert({key, AttributeMap::FloatValue(value)});
}
},
item.second);
}
return AttributeMap(std::move(result));
}
absl::flat_hash_map<std::string, xla::PjRtValueType> ToPjRtDeviceAttributeMap(
AttributeMap attributes) {
absl::flat_hash_map<std::string, xla::PjRtValueType> result;
result.reserve(attributes.map().size());
for (auto& item : attributes.map()) {
std::visit(
[&](auto& value) {
using T = std::decay_t<decltype(value)>;
const auto& key = item.first;
if constexpr (std::is_same_v<T, AttributeMap::StringValue>) {
result.insert({key, std::move(value.value)});
} else if constexpr (std::is_same_v<T, AttributeMap::BoolValue>) {
result.insert({key, value.value});
} else if constexpr (std::is_same_v<T, AttributeMap::Int64Value>) {
result.insert({key, value.value});
} else if constexpr (std::is_same_v<T,
AttributeMap::Int64ListValue>) {
result.insert({key, std::move(value.value)});
} else if constexpr (std::is_same_v<T, AttributeMap::FloatValue>) {
result.insert({key, value.value});
}
},
item.second);
}
return result;
}
}
} | #include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
namespace {
TEST(PjRtAttributeMapUtilTest, FromPjRtDeviceAttributeMap) {
absl::flat_hash_map<std::string, PjRtValueType> pjrt_map({
{"string", xla::PjRtValueType(std::string("value"))},
{"bool", xla::PjRtValueType(true)},
{"int64", xla::PjRtValueType(int64_t{123})},
{"int64_list",
xla::PjRtValueType(std::vector<int64_t>({int64_t{1}, int64_t{2}}))},
{"float", xla::PjRtValueType(1.23f)},
});
EXPECT_EQ(FromPjRtDeviceAttributeMap(pjrt_map).map(),
AttributeMap::Map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list",
AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
}));
}
TEST(PjRtAttributeMapUtilTest, ToPjRtDeviceAttributeMap) {
AttributeMap map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list", AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
});
EXPECT_EQ(
ToPjRtDeviceAttributeMap(map),
(absl::flat_hash_map<std::string, xla::PjRtValueType>({
{"string", xla::PjRtValueType(std::string("value"))},
{"bool", xla::PjRtValueType(true)},
{"int64", xla::PjRtValueType(int64_t{123})},
{"int64_list",
xla::PjRtValueType(std::vector<int64_t>({int64_t{1}, int64_t{2}}))},
{"float", xla::PjRtValueType(1.23f)},
})));
}
}
}
} |
1,802 | cpp | tensorflow/tensorflow | pjrt_client | third_party/xla/xla/pjrt/pjrt_client.cc | third_party/xla/xla/pjrt/pjrt_client_test.cc | #ifndef XLA_PJRT_PJRT_CLIENT_H_
#define XLA_PJRT_PJRT_CLIENT_H_
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "xla/client/xla_computation.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
enum PjRtRuntimeType { kStreamExecutor, kTfrt };
inline constexpr absl::string_view PjRtRuntimeTypeString(PjRtRuntimeType type) {
switch (type) {
case kStreamExecutor:
return "stream_executor";
case kTfrt:
return "tfrt";
}
}
class PjRtClient;
class PjRtDevice;
class PjRtMemorySpace {
public:
virtual ~PjRtMemorySpace() = default;
virtual PjRtClient* client() const = 0;
virtual absl::Span<PjRtDevice* const> devices() const = 0;
virtual int id() const = 0;
virtual absl::string_view kind() const = 0;
virtual int kind_id() const = 0;
virtual absl::string_view DebugString() const = 0;
virtual absl::string_view ToString() const = 0;
};
class PjRtDevice {
public:
virtual ~PjRtDevice() = default;
virtual PjRtClient* client() const = 0;
virtual bool IsAddressable() const = 0;
virtual const PjRtDeviceDescription& description() const {
LOG(FATAL) << "PjRtDeviceDescription not available (must override "
"PjRtDevice::description).";
}
ABSL_DEPRECATED("Use global_device_id() instead")
virtual int id() const { return global_device_id().value(); }
virtual PjRtGlobalDeviceId global_device_id() const {
return PjRtGlobalDeviceId(description().id());
}
virtual PjRtLocalDeviceId local_device_id() const {
return PjRtLocalDeviceId(local_hardware_id_typed().value());
}
virtual PjRtLocalHardwareId local_hardware_id_typed() const = 0;
virtual int process_index() const { return description().process_index(); }
ABSL_DEPRECATED("Use local_hardware_id_typed() instead")
virtual int local_hardware_id() const {
return local_hardware_id_typed().value();
}
virtual absl::string_view device_kind() const {
return description().device_kind();
}
virtual absl::string_view DebugString() const {
return description().DebugString();
}
virtual absl::string_view ToString() const {
return description().ToString();
}
virtual const absl::flat_hash_map<std::string, PjRtDeviceAttribute>&
Attributes() const {
return description().Attributes();
}
virtual std::unique_ptr<ScopedAsyncTrackingEvent> CreateAsyncTrackingEvent(
absl::string_view description) const = 0;
virtual absl::Status TransferToInfeed(const LiteralSlice& literal) = 0;
virtual absl::Status TransferFromOutfeed(MutableBorrowingLiteral literal) = 0;
virtual absl::StatusOr<tsl::AllocatorStats> GetAllocatorStats() const {
return Unimplemented("GetAllocatorStats is not supported");
}
virtual absl::Span<PjRtMemorySpace* const> memory_spaces() const = 0;
virtual absl::StatusOr<PjRtMemorySpace*> default_memory_space() const = 0;
virtual absl::StatusOr<PjRtMemorySpace*> memory_space_by_kind(
absl::string_view memory_space_kind) const {
return Unimplemented("memory_space_by_kind not implemented");
}
virtual absl::StatusOr<std::intptr_t> GetStreamForExternalReadyEvents()
const {
return Unimplemented(
"PjRtDevice::GetStreamForExternalReadyEvents only implemented for "
"GPU");
}
virtual absl::StatusOr<bool> PoisonExecution(int32_t launch_id,
absl::Status error) {
return Unimplemented("PoisonExecution is not supported");
}
};
class PjRtBuffer;
struct PjRtCrossHostRecvDescriptors {
absl::InlinedVector<std::string, 1> serialized_descriptors;
};
using PjRtCrossHostSendCancelNotifier = std::function<void(
absl::string_view serialized_descriptor, absl::Status reason,
std::function<void(absl::Status)> on_canceled)>;
struct PjRtCrossHostRecvState {
std::vector<PjRtCrossHostRecvDescriptors> descriptors;
PjRtCrossHostSendCancelNotifier cancel_notifier;
};
using PjRtCrossHostRecvNotifier =
std::function<void(absl::StatusOr<PjRtCrossHostRecvState>)>;
class PjRtChunk {
public:
static PjRtChunk AllocateDefault(size_t size) {
return PjRtChunk(malloc(size), size, [](void* ptr) { free(ptr); });
}
PjRtChunk() = default;
PjRtChunk(void* data, size_t size, std::function<void(void*)> deleter)
: data_(static_cast<uint8_t*>(data)),
size_(size),
deleter_(std::move(deleter)) {}
~PjRtChunk() {
if (data_) {
deleter_(data_);
}
}
PjRtChunk(PjRtChunk&& other)
: data_(other.data_),
size_(other.size_),
deleter_(std::move(other.deleter_)) {
other.data_ = nullptr;
}
PjRtChunk& operator=(PjRtChunk&& other) {
if (data_) {
deleter_(data_);
}
data_ = other.data_;
size_ = other.size_;
deleter_ = std::move(other.deleter_);
other.data_ = nullptr;
return *this;
}
PjRtChunk(const PjRtChunk&) = delete;
PjRtChunk& operator=(const PjRtChunk&) = delete;
uint8_t* data() { return data_; }
const uint8_t* data() const { return data_; }
int64_t size() const { return size_; }
std::function<void(void*)> deleter() const { return deleter_; }
void release() {
data_ = nullptr;
size_ = 0;
deleter_ = nullptr;
}
private:
uint8_t* data_ = nullptr;
size_t size_ = 0;
std::function<void(void*)> deleter_;
};
class CopyToDeviceStream {
public:
CopyToDeviceStream(int64_t total_bytes, int64_t granule_bytes)
: total_bytes_(total_bytes), granule_bytes_(granule_bytes) {}
virtual ~CopyToDeviceStream();
virtual PjRtFuture<> AddChunk(PjRtChunk chunk) = 0;
int64_t total_bytes() const { return total_bytes_; }
int64_t granule_size_in_bytes() const { return granule_bytes_; }
int64_t current_bytes() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock lock(&mu_);
return current_bytes_;
}
bool IsComplete() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock lock(&mu_);
return IsCompleteLocked();
}
bool empty() const { return current_bytes() == 0; }
protected:
bool IsCompleteLocked() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return current_bytes_ == total_bytes_;
}
int64_t total_bytes_;
int64_t granule_bytes_;
int64_t current_bytes_ ABSL_GUARDED_BY(mu_) = 0;
mutable absl::Mutex mu_;
};
class PjRtHostMemoryForDeviceManager {
public:
virtual ~PjRtHostMemoryForDeviceManager();
virtual absl::StatusOr<PjRtChunk> ToDeviceLayout(
const void* src_data, size_t src_size, const Shape& host_shape,
const Shape& device_shape) = 0;
virtual absl::Status ToHostLayout(const void* src_data, size_t src_size,
const Shape& src_shape, void* dst_data,
size_t dst_size,
const Shape& dst_shape) = 0;
};
class PjRtLoadedExecutable;
struct PjRtPluginAttributes {
int64_t pjrt_c_api_major_version;
int64_t pjrt_c_api_minor_version;
absl::flat_hash_map<std::string, PjRtValueType> attributes;
};
class PjRtClient {
public:
PjRtClient() = default;
explicit PjRtClient(std::unique_ptr<PjRtHostMemoryForDeviceManager>
host_memory_for_device_manager)
: host_memory_for_device_manager_(
std::move(host_memory_for_device_manager)) {}
virtual ~PjRtClient() = default;
virtual int process_index() const = 0;
virtual int device_count() const = 0;
virtual int addressable_device_count() const = 0;
virtual absl::Span<PjRtDevice* const> devices() const = 0;
virtual absl::Span<PjRtDevice* const> addressable_devices() const = 0;
virtual absl::StatusOr<PjRtDevice*> LookupDevice(
PjRtGlobalDeviceId global_device_id) const = 0;
virtual absl::StatusOr<PjRtDevice*> LookupAddressableDevice(
PjRtLocalDeviceId local_device_id) const = 0;
virtual absl::Span<PjRtMemorySpace* const> memory_spaces() const = 0;
virtual PjRtPlatformId platform_id() const = 0;
virtual absl::string_view platform_name() const = 0;
virtual absl::string_view platform_version() const = 0;
virtual std::optional<PjRtPluginAttributes> plugin_attributes() const {
return std::nullopt;
}
virtual PjRtRuntimeType runtime_type() const = 0;
virtual absl::StatusOr<DeviceAssignment> GetDefaultDeviceAssignment(
int num_replicas, int num_partitions) const = 0;
virtual absl::StatusOr<DeviceAssignment> GetDefaultDeviceAssignment(
int num_replicas, std::optional<int> num_replicas_per_slice,
int num_partitions, const MultiSliceConfig* multi_slice_config) const {
return Unimplemented("Multi slice device assignment is not supported.");
}
virtual absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) = 0;
virtual absl::StatusOr<std::unique_ptr<HloCostAnalysis>> GetHloCostAnalysis()
const = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> Compile(
const XlaComputation& computation, CompileOptions options) = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> Compile(
mlir::ModuleOp module, CompileOptions options) = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
DeserializeExecutable(absl::string_view serialized,
std::optional<CompileOptions> options) = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
LoadSerializedExecutable(absl::string_view serialized,
std::optional<CompileOptions> options,
const LoadOptions& load_options) {
return Unimplemented("Loading serialized executable not supported.");
}
virtual absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> Load(
std::unique_ptr<PjRtExecutable> executable,
const LoadOptions& load_options) {
return Unimplemented("Loading executable not supported.");
}
virtual absl::StatusOr<std::unique_ptr<PjRtBuffer>> CreateUninitializedBuffer(
const Shape& shape, PjRtDevice* device) = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtBuffer>> CreateErrorBuffer(
absl::Status error, const Shape& shape, PjRtMemorySpace* memory) {
return Unimplemented("CreateErrorBuffer not supported.");
}
ABSL_DEPRECATED(
"Use CreateErrorBuffer(absl::Status, Shape, PjRtMemorySpace*)")
virtual absl::StatusOr<std::unique_ptr<PjRtBuffer>> CreateErrorBuffer(
absl::Status error, const Shape& shape, PjRtDevice* device) {
auto default_memory_space = device->default_memory_space();
if (!default_memory_space.ok()) {
return default_memory_space.status();
}
return CreateErrorBuffer(std::move(error), shape, *default_memory_space);
}
virtual absl::StatusOr<const PjRtTopologyDescription*>
GetTopologyDescription() const {
return Unimplemented("GetTopologyDescription not supported on platform %s",
platform_name());
}
class AsyncHostToDeviceTransferManager {
public:
virtual ~AsyncHostToDeviceTransferManager() = default;
virtual size_t buffer_count() const = 0;
virtual PjRtDevice* device() const = 0;
virtual std::unique_ptr<PjRtBuffer> RetrieveBuffer(int buffer_index) = 0;
virtual absl::Status TransferLiteralToBuffer(
int buffer_index, const LiteralSlice& literal,
absl::AnyInvocable<void() &&> on_done) = 0;
virtual size_t buffer_size(int buffer_index) const = 0;
virtual absl::Status TransferRawDataToBuffer(
int buffer_index, absl::string_view data,
absl::AnyInvocable<void() &&> on_done) = 0;
virtual absl::Status TransferRawDataToSubBuffer(
int buffer_index, const void* data, int64_t offset,
int64_t transfer_size, bool is_last_transfer,
absl::AnyInvocable<void() &&> on_done) = 0;
virtual void SetBufferError(int buffer_index, absl::Status error) = 0;
using TransferMetadata = absl::flat_hash_map<std::string, std::string>;
v | #include "xla/pjrt/pjrt_client_test.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/span.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TestClientFactory {
public:
void Register(
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory) {
absl::MutexLock lock(&mu_);
CHECK(!factory_);
factory_ = std::move(factory);
}
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> Get() const {
absl::MutexLock lock(&mu_);
return factory_;
}
private:
mutable absl::Mutex mu_;
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory_
ABSL_GUARDED_BY(mu_);
};
TestClientFactory& GetGlobalTestClientFactory() {
static auto* const factory = new TestClientFactory;
return *factory;
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetClient() {
return GetGlobalTestClientFactory().Get()();
}
}
void RegisterTestClientFactory(
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory) {
GetGlobalTestClientFactory().Register(std::move(factory));
}
namespace {
std::unique_ptr<PjRtLoadedExecutable> MakeIncrementProgram(
PjRtClient* client, bool alias, int device, bool tuplize_arg = false) {
Shape shape = ShapeUtil::MakeShape(S32, {4});
XlaBuilder builder("inc");
if (tuplize_arg) {
shape = ShapeUtil::MakeTupleShape({shape});
}
auto inp = Parameter(&builder, 0, shape, "inp");
if (tuplize_arg) {
inp = GetTupleElement(inp, 0);
}
auto one = ConstantR0<int32_t>(&builder, 1);
auto inc = Add(inp, one);
if (alias) {
builder.SetUpAlias({}, 0, {});
}
XlaComputation computation = builder.Build(inc).value();
DeviceAssignment assignment(1, 1);
assignment(0, 0) = device;
CompileOptions options;
options.parameter_is_tupled_arguments = tuplize_arg;
options.executable_build_options.set_device_assignment(assignment);
return client->Compile(computation, options).value();
}
class PjRtClientTest
: public ::testing::TestWithParam<ExecuteOptions::ExecutionMode> {};
TEST_P(PjRtClientTest, Execute) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithImmutableUntilTransferCompletes) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithTupleZeroCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable = MakeIncrementProgram(client.get(), false,
0, true);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[&data]() {
std::fill(data.begin(), data.end(), 1);
},
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
buffer.reset();
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithDonation) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithDonationAbort) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
if (client->platform_id() == CpuId()) {
return;
}
auto executable =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
auto external_reference = buffer->AcquireExternalReference();
ExecuteOptions options;
options.execution_mode = GetParam();
auto resultsor = executable->Execute({{buffer.get()}}, options);
ASSERT_FALSE(resultsor.ok());
EXPECT_THAT(resultsor.status().message(),
::testing::HasSubstr(
"Donation requested for buffer with external reference"));
}
TEST_P(PjRtClientTest, ExecuteWithConcurrentUsage) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "ExecuteWithConcurrentUsage", kNumThreads);
constexpr int kConcurrency = 16;
absl::BlockingCounter blocking_counter(kConcurrency);
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrency);
for (int i = 0; i < kConcurrency; ++i) {
thread_pool.Schedule([&, &result = results[i]]() {
auto results = executable->Execute({{buffer.get()}}, options).value();
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
result = std::move(results[0][0]);
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
std::vector<int32_t> expected(4, 1);
for (const auto& result : results) {
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
TEST_P(PjRtClientTest, ExecuteWithConcurrentUsageAndDonation) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
auto executable_with_donation =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
std::vector<int32_t> expected(4, 1);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(),
"ExecuteWithConcurrentUsageAndDonation",
kNumThreads);
constexpr int kConcurrentUsage = 16;
absl::BlockingCounter blocking_counter(kConcurrentUsage + 1);
for (int i = 0; i < kConcurrentUsage; ++i) {
thread_pool.Schedule([&]() {
auto results_or = executable->Execute({{buffer.get()}}, options);
if (results_or.ok()) {
auto& results = *results_or;
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
auto literal = results[0][0]->ToLiteralSync().value();
CHECK(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
blocking_counter.DecrementCount();
});
}
std::unique_ptr<PjRtBuffer> result;
thread_pool.Schedule([&]() {
auto results =
executable_with_donation->Execute({{buffer.get()}}, options).value();
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
result = std::move(results[0][0]);
blocking_counter.DecrementCount();
});
blocking_counter.Wait();
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
INSTANTIATE_TEST_SUITE_P(
PjRtClientTestSuite, PjRtClientTest,
::testing::Values(ExecuteOptions::ExecutionMode::kSynchronous,
ExecuteOptions::ExecutionMode::kAsynchronous));
TEST(PjRtClientTest, CopyToDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto* device_1 = client->addressable_devices()[1];
TF_ASSERT_OK_AND_ASSIGN(auto result, buffer->CopyToDevice(device_1));
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(PjRtClientTest, CopyToDeviceAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto* device_1 = client->addressable_devices()[1];
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "CopyToDeviceAsync",
kNumThreads);
constexpr int kConcurrentCopy = 16;
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrentCopy);
for (int i = 0; i < kConcurrentCopy; ++i) {
TF_ASSERT_OK_AND_ASSIGN(results[i], buffer->CopyToDevice(device_1));
}
buffer.reset();
for (const auto& result : results) {
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
TEST(PjRtClientTest, CopyToDeviceAsyncExternalCpuOnly) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
if (client->platform_id() != CpuId()) return;
std::vector<int32_t> data(4, 0);
auto* data_ptr = data.data();
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateViewOfDeviceBuffer(
data_ptr, shape, client->addressable_devices()[0],
[data = std::move(data)]() mutable {
data.clear();
data.shrink_to_fit();
}));
auto* device_1 = client->addressable_devices()[1];
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(),
"CopyToDeviceAsyncExternal", kNumThreads);
constexpr int kConcurrentCopy = 16;
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrentCopy);
for (int i = 0; i < kConcurrentCopy; ++i) {
TF_ASSERT_OK_AND_ASSIGN(results[i], buffer->CopyToDevice(device_1));
}
buffer.reset();
for (const auto& result : results) {
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> MakeFloatBuffer(
PjRtClient* client, const std::vector<float>& data,
absl::Span<const int64_t> dimensions) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
return client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]);
}
TEST(PjRtClientTest, DuplicateDonationError) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
constexpr char kProgram[] =
R"(HloModule DuplicateDonationError, input_output_alias={ {0}: (1, {}, must-alias), {1}: (2, {}, must-alias) }
ENTRY DuplicateDonationError() -> (f32[2, 2], f32[2, 2]) {
%input0 = f32[2, 2] parameter(0)
%input1 = f32[2, 2] parameter(1)
%input2 = f32[2, 2] parameter(2)
%input3 = f32[2, 2] parameter(3)
%tmp1 = f32[2, 2] add(%input0, %input1)
%tmp2 = f32[2, 2] add(%input2, %input3)
ROOT %result = (f32[2, 2], f32[2, 2]) tuple(%tmp1, %tmp2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, {}));
std::vector<float> data(4, 0);
TF_ASSERT_OK_AND_ASSIGN(auto buffer0,
MakeFloatBuffer(client.get(), data, {2, 2}));
TF_ASSERT_OK_AND_ASSIGN(auto buffer1,
MakeFloatBuffer(client.get(), data, {2, 2}));
TF_ASSERT_OK_AND_ASSIGN(auto buffer2,
MakeFloatBuffer(client.get(), data, {2, 2}));
{
auto result = pjrt_executable->Execute({{
buffer0.get(),
buffer1.get(),
buffer1.get(),
buffer0.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(donate(a), donate(a))"));
}
{
auto result = pjrt_executable->Execute({{
buffer1.get(),
buffer1.get(),
buffer2.get(),
buffer0.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(a, donate(a))"));
}
{
auto result = pjrt_executable->Execute({{
buffer0.get(),
buffer1.get(),
buffer2.get(),
buffer2.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(donate(a), a)"));
}
}
TEST(PjRtClientTest, GetDefaultLayout) {}
}
} |
1,803 | cpp | tensorflow/tensorflow | array_util | third_party/xla/xla/python/ifrt_proxy/common/array_util.cc | third_party/xla/xla/python/ifrt_proxy/common/array_util_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_COMMON_ARRAY_UTIL_H_
#define XLA_PYTHON_IFRT_PROXY_COMMON_ARRAY_UTIL_H_
#include <optional>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::StatusOr<std::vector<int64_t>> DefaultByteStrides(DType dtype,
const Shape& shape);
class ArrayMemRegion {
public:
using ByteStrides = std::optional<absl::Span<const int64_t>>;
static absl::StatusOr<ArrayMemRegion> FromMinimalMemRegion(
absl::string_view mem_region, DType dtype, const Shape& shape,
ByteStrides byte_strides);
static absl::StatusOr<ArrayMemRegion> FromZerothElementPointer(
const void* zeroth_element, DType dtype, const Shape& shape,
ByteStrides byte_strides);
absl::string_view mem_region() const;
void* zeroth_element() const;
private:
ArrayMemRegion(void* mem_region_start, size_t nbytes)
: mem_region_start_(mem_region_start), nbytes_(nbytes) {}
void* const mem_region_start_;
const size_t nbytes_;
};
}
}
}
#endif
#include "xla/python/ifrt_proxy/common/array_util.h"
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
std::string StridesAsStr(const ArrayMemRegion::ByteStrides& strides) {
if (!strides.has_value()) return "strides{nullopt}";
return absl::StrCat("strides{", absl::StrJoin(*strides, ","), "}");
}
}
absl::StatusOr<std::vector<int64_t>> DefaultByteStrides(const DType dtype,
const Shape& shape) {
if (!dtype.byte_size().has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported data type to query byte-strides for: ",
dtype.DebugString()));
}
std::vector<int64_t> result(shape.dims().size());
int64_t stride = *dtype.byte_size();
for (int i = static_cast<int>(shape.dims().size()) - 1; i >= 0; --i) {
result[i] = stride;
stride *= shape.dims()[i];
}
return result;
}
absl::StatusOr<ArrayMemRegion> ArrayMemRegion::FromZerothElementPointer(
const void* zeroth_element, const DType dtype, const Shape& shape,
ByteStrides byte_strides) {
if (!dtype.byte_size().has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported data type to construct ArrayMemRegion: ",
dtype.DebugString()));
}
void* const mem_region_start = const_cast<void*>(zeroth_element);
if (!byte_strides.has_value() ||
(byte_strides->empty() && shape.dims().empty())) {
return ArrayMemRegion(mem_region_start,
dtype.byte_size().value() * shape.num_elements());
}
if (shape.num_elements() == 0) {
return ArrayMemRegion(mem_region_start, 0);
}
if (shape.dims().size() != byte_strides->size()) {
return absl::InvalidArgumentError(
absl::StrCat("Shape has different dimensions from byte_strides: ",
shape.DebugString(), " vs ", StridesAsStr(byte_strides)));
}
uint64_t last_element_byte_offset = 0;
for (int i = 0; i < byte_strides->size(); ++i) {
int stride = (*byte_strides)[i];
if (shape.dims()[i] < 0) {
return absl::InvalidArgumentError(
absl::StrCat("A shape dimension is negative: ", shape.DebugString()));
} else if (shape.dims()[i] == 1) {
continue;
} else if (stride <= 0) {
return absl::UnimplementedError(
absl::StrCat("Negative or zero strides are not fully supported: ",
StridesAsStr(byte_strides)));
} else if (stride % dtype.byte_size().value() != 0) {
return absl::UnimplementedError(absl::StrCat(
"byte_stride[", i, "] is not a multiple of the data-type's size: ",
StridesAsStr(byte_strides), ", dtype=", dtype.DebugString()));
} else {
DCHECK_GT(shape.dims()[i], 0);
last_element_byte_offset += (stride * (shape.dims()[i] - 1));
}
}
return ArrayMemRegion(mem_region_start,
last_element_byte_offset + dtype.byte_size().value());
}
absl::StatusOr<ArrayMemRegion> ArrayMemRegion::FromMinimalMemRegion(
absl::string_view mem_region, const DType dtype, const Shape& shape,
ByteStrides byte_strides) {
TF_ASSIGN_OR_RETURN(
auto result,
FromZerothElementPointer(mem_region.data(), dtype, shape, byte_strides));
if (result.mem_region().size() != mem_region.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect size ", result.mem_region().size(), " vs ",
mem_region.size(), "; is provided memory region minimal? ",
dtype.DebugString(), " ", shape.DebugString(), " ",
StridesAsStr(byte_strides)));
}
CHECK_EQ(result.mem_region().data(), mem_region.data());
return result;
}
absl::string_view ArrayMemRegion::mem_region() const {
return absl::string_view(static_cast<char*>(mem_region_start_), nbytes_);
}
void* ArrayMemRegion::zeroth_element() const {
return mem_region_start_;
}
}
}
} | #include "xla/python/ifrt_proxy/common/array_util.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::ElementsAre;
using ::testing::Not;
using ::testing::TestWithParam;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
constexpr DType::Kind kF64 = DType::Kind::kF64;
constexpr DType::Kind kS32 = DType::Kind::kS32;
constexpr DType::Kind kString = DType::Kind::kString;
using Strides = std::vector<int64_t>;
TEST(DefaultByteStrides, ErrorsIfBadDtype) {
EXPECT_THAT(DefaultByteStrides(DType(kString), Shape({1})), Not(IsOk()));
}
TEST(DefaultByteStrides, HappyCase) {
EXPECT_THAT(DefaultByteStrides(DType(kF64), Shape({4, 3, 5})),
IsOkAndHolds(ElementsAre(120, 40, 8)));
}
struct TC {
const std::string test_name;
const DType::Kind dtype_kind;
const std::vector<int64_t> shape;
const std::optional<std::vector<int64_t>> byte_strides;
const std::optional<size_t> expected_size;
};
std::string PrintToString(const TC& tc) { return tc.test_name; }
class ArrayMemRegionSuccess : public TestWithParam<TC> {};
INSTANTIATE_TEST_SUITE_P(
Tests, ArrayMemRegionSuccess,
testing::Values(
TC{"DefaultF64", kF64, {4, 3, 5}, std::nullopt},
TC{"MajorToMinorStridesF64", kF64, {4, 3, 5}, Strides({120, 40, 8})},
TC{"NotMajorToMinorF64", kF64, {3, 4, 5}, Strides({40, 120, 8})},
TC{"TransposedF64", kF64, {5, 3, 4}, Strides({8, 40, 120})},
TC{"DefaultS32", kS32, {4, 3, 5}, std::nullopt},
TC{"MajorToMinorStridesS32", kS32, {4, 3, 5}, Strides({60, 20, 4})},
TC{"NotMajorToMinorS32", kS32, {3, 4, 5}, Strides({20, 60, 4})},
TC{"TransposedS32", kS32, {5, 3, 4}, Strides({4, 20, 60})},
TC{"ScalarF64DefaultStrides", kF64, {}, std::nullopt},
TC{"ScalarF64EmptyStrides", kF64, {}, Strides({})},
TC{"NoColsDefaultStrides", kF64, {5, 0}, std::nullopt},
TC{"NoColsStridesNonZero", kF64, {5, 0}, Strides({40, 4})},
TC{"NoColsStridesZero", kF64, {5, 0}, Strides({0, 0})},
TC{"NoRowsDefaultStrides", kF64, {0, 5}, std::nullopt},
TC{"NoRowsStridesNonZero", kF64, {0, 5}, Strides({40, 4})},
TC{"NoRowsStridesZero", kF64, {0, 5}, Strides({0, 0})},
TC{"SingleElementArbitraryStrides", kF64, {1, 1}, Strides({100, 100})},
TC{"OneRowArbitraryColStride", kF64, {1, 5}, Strides({100, 8})},
TC{"OneColArbitraryRowStride", kF64, {5, 1}, Strides({8, 100})},
TC{"OneRowZeroColStride", kF64, {1, 5}, Strides({0, 8})},
TC{"OneColZeroRowStride", kF64, {5, 1}, Strides({8, 0})},
TC{"NonCompactSingleDimension", kS32, {5}, Strides({16}), 68},
TC{"NonCompactDim0", kS32, {4, 3, 5}, Strides({120, 20, 4}), 420},
TC{"PaddedElements", kS32, {4, 3, 5}, Strides({120, 40, 8}), 476}),
testing::PrintToStringParamName());
TEST_P(ArrayMemRegionSuccess, TestCase) {
const TC tc = GetParam();
const DType dtype(tc.dtype_kind);
const Shape shape(tc.shape);
const size_t expected_size = tc.expected_size.value_or(
dtype.byte_size().value() * shape.num_elements());
std::string data(expected_size, 'a');
TF_ASSERT_OK_AND_ASSIGN(auto mem_region1,
ArrayMemRegion::FromZerothElementPointer(
data.data(), dtype, shape, tc.byte_strides));
EXPECT_EQ(mem_region1.zeroth_element(), data.data());
EXPECT_EQ(mem_region1.mem_region().data(), data.data());
EXPECT_EQ(mem_region1.mem_region().size(), data.size());
TF_ASSERT_OK_AND_ASSIGN(
auto mem_region2, ArrayMemRegion::FromMinimalMemRegion(data, dtype, shape,
tc.byte_strides));
EXPECT_EQ(mem_region2.zeroth_element(), data.data());
EXPECT_EQ(mem_region2.mem_region().data(), data.data());
EXPECT_EQ(mem_region2.mem_region().size(), data.size());
}
class ArrayMemRegionFailure : public TestWithParam<TC> {};
INSTANTIATE_TEST_SUITE_P(
Tests, ArrayMemRegionFailure,
testing::Values(
TC{"OneString", kString, {}, std::nullopt},
TC{"ManyStrings", kString, {5}, std::nullopt},
TC{"NegativeByteStrides", kS32, {4, 3, 5}, Strides({-60, -20, -4})},
TC{"ZeroByteStride", kS32, {5, 5}, Strides({0, 0})},
TC{"SmallerByteStrideThanDataType", kS32, {5, 5}, Strides({1, 1})},
TC{"ByteStrideIndivisibleByDataType", kS32, {5, 5}, Strides({7, 7})},
TC{"NegativeShapeDimension", kS32, {-5, -5}, Strides({20, 4})}),
testing::PrintToStringParamName());
TEST_P(ArrayMemRegionFailure, TestCase) {
const TC tc = GetParam();
const DType dtype(tc.dtype_kind);
const Shape shape(tc.shape);
char const* kSomeAddr = reinterpret_cast<char*>(1UL << 48);
auto mem_region1 = ArrayMemRegion::FromZerothElementPointer(
kSomeAddr, dtype, shape, tc.byte_strides);
EXPECT_THAT(mem_region1.status(), Not(IsOk()));
const size_t kSomeSize = 1024;
auto mem_region2 = ArrayMemRegion::FromMinimalMemRegion(
absl::string_view(kSomeAddr, kSomeSize), dtype, shape, tc.byte_strides);
EXPECT_THAT(mem_region2.status(), Not(IsOk()));
}
TEST(ArrayMemRegion, FromBadMemRegionSizeFails) {
const DType kDType(kS32);
const Shape kShape({5, 5});
const size_t kDataBytes = kDType.byte_size().value() * kShape.num_elements();
const size_t kExtraSuffixBytes = 10;
std::string data_with_extra_suffix(kDataBytes + kExtraSuffixBytes, 'a');
TF_ASSERT_OK_AND_ASSIGN(
auto mem_region1,
ArrayMemRegion::FromZerothElementPointer(
data_with_extra_suffix.data(), kDType, kShape,
std::nullopt));
EXPECT_EQ(mem_region1.mem_region().data(), data_with_extra_suffix.data());
EXPECT_EQ(mem_region1.zeroth_element(), data_with_extra_suffix.data());
EXPECT_LT(mem_region1.mem_region().size(), data_with_extra_suffix.size());
EXPECT_EQ(mem_region1.mem_region().size(), kDataBytes);
auto mem_region2 = ArrayMemRegion::FromMinimalMemRegion(
data_with_extra_suffix, kDType, kShape,
std::nullopt);
EXPECT_THAT(mem_region2.status(), Not(IsOk()));
std::string data_without_some_bytes(kDataBytes - kExtraSuffixBytes, 'a');
auto mem_region3 = ArrayMemRegion::FromMinimalMemRegion(
data_without_some_bytes, kDType, kShape,
std::nullopt);
EXPECT_THAT(mem_region3.status(), Not(IsOk()));
}
}
}
}
} |
1,804 | cpp | tensorflow/tensorflow | grpc_client_session | third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session.cc | third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_CLIENT_GRPC_CLIENT_SESSION_H_
#define XLA_PYTHON_IFRT_PROXY_CLIENT_GRPC_CLIENT_SESSION_H_
#include <functional>
#include <memory>
#include "absl/base/call_once.h"
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "grpcpp/client_context.h"
#include "grpcpp/support/client_callback.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/unbounded_work_queue.h"
namespace xla {
namespace ifrt {
namespace proxy {
class GrpcClientSession : public ClientSession {
public:
using StreamTerminatedCallback = std::function<void(absl::Status)>;
static std::shared_ptr<GrpcClientSession> Create(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
GrpcIfrtSessionMetadata metadata,
StreamTerminatedCallback stream_terminated_cb);
Future<std::shared_ptr<IfrtResponse>> Enqueue(
std::unique_ptr<IfrtRequest> request) override;
using ResponseCallback =
std::function<void(absl::StatusOr<std::shared_ptr<IfrtResponse>>)>;
absl::Status Enqueue(std::unique_ptr<IfrtRequest> req,
ResponseCallback callback);
void Finish(const absl::Status& client_status) override;
GrpcClientSession(const GrpcClientSession&) = delete;
GrpcClientSession& operator=(const GrpcClientSession&) = delete;
~GrpcClientSession() override;
private:
class ResponseCallbackTable;
GrpcClientSession(std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
std::unique_ptr<::grpc::ClientContext> context,
StreamTerminatedCallback stream_terminated_cb);
void ReadLoop();
const std::unique_ptr<ResponseCallbackTable> response_callbacks_;
std::unique_ptr<tsl::thread::ThreadPool> reader_thread_;
absl::Notification reader_thread_stopped_;
bool writes_stopped_ ABSL_GUARDED_BY(writer_mu_) = false;
absl::Mutex writer_mu_;
absl::once_flag finish_once_;
const std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub_;
const std::unique_ptr<::grpc::ClientContext> context_;
const std::unique_ptr<
::grpc::ClientReaderWriterInterface<IfrtRequest, IfrtResponse>>
stream_;
const StreamTerminatedCallback stream_terminated_cb_;
std::unique_ptr<tsl::UnboundedWorkQueue> user_futures_work_queue_;
};
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> CreateGrpcStub(
absl::string_view server_address);
}
}
}
#endif
#include "xla/python/ifrt_proxy/client/grpc_client_session.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "grpc/grpc.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "xla/pjrt/distributed/util.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/unbounded_work_queue.h"
namespace xla {
namespace ifrt {
namespace proxy {
using OpId = int64_t;
class GrpcClientSession::ResponseCallbackTable {
public:
absl::Status Add(OpId op_id, ResponseCallback callback) {
absl::MutexLock l(&mu_);
const bool inserted = table_.insert({op_id, std::move(callback)}).second;
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("Op id ", op_id, " already exists"));
}
return absl::OkStatus();
}
std::optional<ResponseCallback> Pop(OpId op_id) {
absl::MutexLock l(&mu_);
auto it = table_.find(op_id);
if (it == table_.end()) {
return std::nullopt;
}
auto cb = std::move(it->second);
table_.erase(it);
return std::move(cb);
}
absl::flat_hash_map<OpId, ResponseCallback> PopAll() {
absl::flat_hash_map<OpId, ResponseCallback> result;
absl::MutexLock l(&mu_);
result = std::move(table_);
table_ = absl::flat_hash_map<OpId, ResponseCallback>();
return result;
}
private:
absl::Mutex mu_;
absl::flat_hash_map<OpId, ResponseCallback> table_ ABSL_GUARDED_BY(mu_);
};
std::shared_ptr<GrpcClientSession> GrpcClientSession::Create(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
GrpcIfrtSessionMetadata metadata,
StreamTerminatedCallback stream_terminated_cb) {
auto context = std::make_unique<::grpc::ClientContext>();
context->AddMetadata("ifrt-proxy-grpc-ifrt-session-metadata-bin",
metadata.SerializeAsString());
std::shared_ptr<GrpcClientSession> result(new GrpcClientSession(
std::move(stub), std::move(context), std::move(stream_terminated_cb)));
return result;
}
GrpcClientSession::GrpcClientSession(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
std::unique_ptr<::grpc::ClientContext> context,
StreamTerminatedCallback stream_terminated_cb)
: response_callbacks_(std::make_unique<ResponseCallbackTable>()),
reader_thread_(std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), "ifrt_proxy_client_grpc_reader",
1)),
stub_(std::move(stub)),
context_(std::move(context)),
stream_(stub_->IfrtSession(context_.get())),
stream_terminated_cb_(std::move(stream_terminated_cb)),
user_futures_work_queue_(std::make_unique<tsl::UnboundedWorkQueue>(
tsl::Env::Default(), "GrpcClientSessionUserFuturesWorkQueue")) {
reader_thread_->Schedule(
absl::bind_front(&GrpcClientSession::ReadLoop, this));
}
Future<std::shared_ptr<IfrtResponse>> GrpcClientSession::Enqueue(
std::unique_ptr<IfrtRequest> request) {
auto promise = Future<std::shared_ptr<IfrtResponse>>::CreatePromise();
absl::Status status = Enqueue(
std::move(request),
[promise, queue = user_futures_work_queue_.get()](
absl::StatusOr<std::shared_ptr<IfrtResponse>> response) mutable {
queue->Schedule([promise = std::move(promise),
response = std::move(response)]() mutable -> void {
promise.Set(std::move(response));
});
});
if (!status.ok()) {
user_futures_work_queue_->Schedule([promise, status]() mutable -> void {
promise.Set(std::move(status));
});
}
return Future<std::shared_ptr<IfrtResponse>>(std::move(promise));
}
absl::Status GrpcClientSession::Enqueue(std::unique_ptr<IfrtRequest> req,
ResponseCallback callback) {
const OpId op_id = req->request_metadata().op_id();
absl::MutexLock l(&writer_mu_);
if (writes_stopped_) {
return absl::FailedPreconditionError(
"GrpcClientSession: writes no longer allowed.");
}
TF_RETURN_IF_ERROR(response_callbacks_->Add(op_id, std::move(callback)));
if (!stream_->Write(*req)) {
CHECK(response_callbacks_->Pop(op_id).has_value());
return absl::UnknownError("GrpcClientSession: writing to stream failed.");
}
return absl::OkStatus();
}
void GrpcClientSession::ReadLoop() {
while (true) {
auto read_buffer = std::make_unique<IfrtResponse>();
if (!stream_->Read(read_buffer.get())) {
LOG(INFO) << "GrpcClientSession: reader loop is exiting.";
break;
}
const OpId op_id = read_buffer->response_metadata().op_id();
std::optional<ResponseCallback> callback = response_callbacks_->Pop(op_id);
if (callback.has_value()) {
VLOG(1) << "GrpcClientSession: Issuing callback for " << op_id;
(*callback)(std::move(read_buffer));
VLOG(1) << "GrpcClientSession: Done with callback for " << op_id;
} else {
LOG(ERROR) << "Received response with no remaining registered callback: "
<< read_buffer->DebugString();
}
}
reader_thread_stopped_.Notify();
Finish(absl::OkStatus());
}
void GrpcClientSession::Finish(const absl::Status& client_status) {
LOG(INFO) << "GrpcClientSession: Finish() called with client status "
<< client_status;
absl::call_once(finish_once_, [&] {
context_->TryCancel();
LOG(INFO) << "GrpcClientSession: Waiting for reader thread to stop.";
reader_thread_stopped_.WaitForNotification();
auto finish_stream_and_get_server_status = [&]() -> absl::Status {
LOG(INFO) << "GrpClientSession: Attempting to call stream->Finish()";
absl::MutexLock l(&writer_mu_);
LOG(INFO) << "GrpClientSession: Attempting to call stream->Finish(), "
"mutex acquired";
absl::Status server_status = xla::FromGrpcStatus(stream_->Finish());
LOG(INFO) << "GrpClientSession: stream->Finish() returned server status "
<< server_status;
CHECK(!writes_stopped_);
writes_stopped_ = true;
return server_status;
};
absl::Status combined_status = finish_stream_and_get_server_status();
combined_status.Update(client_status);
auto all_callbacks = response_callbacks_->PopAll();
for (auto& [_, cb] : all_callbacks) {
if (combined_status.ok()) {
cb(absl::AbortedError("Finish(OK) called."));
} else {
cb(combined_status);
}
}
LOG(INFO) << "GrpClientSession::Finish(): calling terminated cb with "
<< combined_status;
stream_terminated_cb_(combined_status);
});
}
GrpcClientSession::~GrpcClientSession() {
GrpcClientSession::Finish(absl::CancelledError("~GrpcClientSession called."));
reader_thread_.reset();
LOG(INFO) << "Deleting GrpcClientSession.user_futures_work_queue_ ...";
user_futures_work_queue_.reset();
LOG(INFO) << "Deleted GrpcClientSession.user_futures_work_queue_.";
}
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> CreateGrpcStub(
absl::string_view server_address) {
::grpc::ChannelArguments args;
args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, -1);
args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, -1);
std::shared_ptr<::grpc::Channel> channel = ::grpc::CreateCustomChannel(
std::string(server_address), GetClientCredentials(), args);
VLOG(0) << " Established channel.";
CHECK(channel != nullptr);
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub =
grpc::GrpcIfrtService::NewStub(channel);
VLOG(0) << " Created stub.";
CHECK(stub != nullptr);
return stub;
}
}
}
} | #include "xla/python/ifrt_proxy/client/grpc_client_session.h"
#include <atomic>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/log/log_sink_registry.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "grpc/support/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/python/ifrt_proxy/client/version.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
constexpr int kOp1 = 1;
constexpr int kOp2 = 2;
constexpr absl::Duration kSufficientTime = absl::Seconds(5);
GrpcIfrtSessionMetadata Metadata() {
GrpcIfrtSessionMetadata metadata;
metadata.mutable_version()->set_protocol_version(kClientMaxVersion);
return metadata;
}
absl::Status TestError() { return absl::UnknownError("test error"); }
class Queue {
public:
void Push(absl::Status t) {
absl::MutexLock l(&mu_);
queue_.push_back(std::move(t));
}
std::optional<absl::Status> PopOrTimeout(
absl::Duration timeout = kSufficientTime) {
absl::MutexLock l(&mu_);
auto cond = [this]() ABSL_SHARED_LOCKS_REQUIRED(mu_) -> bool {
return !queue_.empty();
};
mu_.AwaitWithTimeout(absl::Condition(&cond), timeout);
if (queue_.empty()) {
return std::nullopt;
}
absl::Status result = std::move(queue_.front());
queue_.pop_front();
return result;
}
absl::Status Pop(absl::Duration timeout = kSufficientTime) {
auto result = PopOrTimeout(timeout);
CHECK(result.has_value()) << "Timeout!";
return *result;
}
void PopAllDuringDestruction() {
absl::MutexLock l(&mu_);
allow_non_empty_destruction_ = true;
}
~Queue() {
absl::MutexLock l(&mu_);
if (!allow_non_empty_destruction_) CHECK(queue_.empty()) << " " << this;
}
private:
absl::Mutex mu_;
std::deque<absl::Status> queue_ ABSL_GUARDED_BY(mu_);
bool allow_non_empty_destruction_ ABSL_GUARDED_BY(mu_) = false;
};
void ExpectHeadAndTail(
std::vector<std::variant<absl::StatusOr<Queue*>, absl::Status>> var_list) {
std::vector<absl::Status> status_list;
for (const auto& v : var_list) {
if (std::holds_alternative<absl::StatusOr<Queue*>>(v)) {
status_list.push_back(std::get<absl::StatusOr<Queue*>>(v).status());
} else {
status_list.push_back(std::get<absl::Status>(v));
}
}
bool seen_not_ok = false;
std::string str;
for (const auto& s : status_list) {
absl::StrAppend(&str, "\n", s.ToString(), "\n-----\n");
}
for (const auto& s : status_list) {
if (!s.ok()) seen_not_ok = true;
if (seen_not_ok) {
EXPECT_THAT(s, Not(IsOk())) << str;
}
}
}
using ServerStream = ::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>;
using SessionAction = bool;
constexpr SessionAction kContinueSession = true;
constexpr SessionAction kStopSession = false;
using OnSessionStart = std::function<SessionAction()>;
using OnReqReceived =
std::function<SessionAction(const IfrtRequest&, ServerStream*)>;
class SimpleIfrtService : public grpc::GrpcIfrtService::Service {
public:
SimpleIfrtService(OnReqReceived on_req_received,
OnSessionStart on_session_start)
: on_req_received_(std::move(on_req_received)),
on_session_start_(std::move(on_session_start)) {}
::grpc::Status IfrtSession(::grpc::ServerContext* context,
ServerStream* stream) override {
if (on_session_start_ && on_session_start_() == kStopSession) {
return ::grpc::Status::OK;
}
{
absl::MutexLock l(&mu_);
CHECK(contexts_.insert(context).second);
}
while (true) {
IfrtRequest request;
LOG(INFO) << "Server: waiting on Read().";
if (!stream->Read(&request)) {
LOG(INFO) << "Server: Read() returned false.";
break;
}
LOG(INFO) << "Server: Read() returned true.";
if (!on_req_received_) {
IfrtResponse response;
response.mutable_response_metadata()->set_op_id(
request.request_metadata().op_id());
stream->Write(response);
} else if (on_req_received_(request, stream) == kStopSession) {
break;
}
}
{
absl::MutexLock l(&mu_);
CHECK_EQ(contexts_.erase(context), 1);
}
LOG(INFO) << "Finishing IFRT session";
return ::grpc::Status::OK;
}
void CancelAllServerSessions() {
absl::MutexLock l(&mu_);
for (const auto& context : contexts_) {
context->TryCancel();
}
}
private:
const OnReqReceived on_req_received_;
const OnSessionStart on_session_start_;
absl::Mutex mu_;
absl::flat_hash_set<::grpc::ServerContext*> contexts_ ABSL_GUARDED_BY(mu_);
};
class ClientAndServer {
public:
explicit ClientAndServer(OnReqReceived on_req_received = nullptr,
OnSessionStart on_session_start = nullptr) {
std::string address =
absl::StrCat("localhost:", tsl::testing::PickUnusedPortOrDie());
::grpc::ServerBuilder builder;
builder.AddListeningPort(address, GetServerCredentials());
ifrt_service_ =
std::make_unique<SimpleIfrtService>(on_req_received, on_session_start);
builder.RegisterService(ifrt_service_.get());
server_ = builder.BuildAndStart();
LOG(INFO) << "Server started and listening on " << address;
absl::FlushLogSinks();
std::shared_ptr<::grpc::Channel> channel =
::grpc::CreateChannel(address, GetClientCredentials());
channel->WaitForConnected(gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)));
LOG(INFO) << "conn_state = " << channel->GetState(false);
auto stub = grpc::GrpcIfrtService::NewStub(channel);
CHECK(stub != nullptr);
client_session_ = GrpcClientSession::Create(
std::move(stub), Metadata(), [this](absl::Status s) {
client_finished_q_.Push(s);
client_finished_notification_.Notify();
});
client_finished_q_.PopAllDuringDestruction();
}
void StopServer() {
ifrt_service_->CancelAllServerSessions();
server_->Shutdown();
server_->Wait();
}
~ClientAndServer() {
StopServer();
client_session_->Finish(absl::CancelledError("~ClientAndServer"));
client_finished_notification_.WaitForNotificationWithTimeout(
kSufficientTime);
CHECK(client_finished_notification_.HasBeenNotified());
}
GrpcClientSession* client_session() { return client_session_.get(); }
Queue* client_finished_q() { return &client_finished_q_; }
absl::StatusOr<Queue*> SendSimpleRequest(int op_id) {
owned_queues_.push_back(std::make_unique<Queue>());
Queue* q = owned_queues_.back().get();
auto req = std::make_unique<IfrtRequest>();
req->mutable_request_metadata()->set_op_id(op_id);
TF_RETURN_IF_ERROR(client_session_->Enqueue(
std::move(req), [q](absl::StatusOr<GrpcClientSession::Response> resp) {
q->Push(resp.status());
}));
return q;
}
private:
std::vector<std::unique_ptr<Queue>> owned_queues_;
Queue client_finished_q_;
absl::Notification client_finished_notification_;
std::shared_ptr<GrpcClientSession> client_session_;
std::unique_ptr<::grpc::Server> server_;
std::unique_ptr<SimpleIfrtService> ifrt_service_;
};
TEST(GrpcClientSessionTest, HappyCaseOneRequestWithServerTermination) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q, cs.SendSimpleRequest(kOp1));
EXPECT_THAT(response_q->Pop(), IsOk());
EXPECT_EQ(cs.client_finished_q()->PopOrTimeout(), std::nullopt);
cs.StopServer();
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, HappyCaseTwoRequestsWithClientFinish) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest(kOp1));
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_2, cs.SendSimpleRequest(kOp2));
EXPECT_THAT(response_q_1->Pop(), IsOk());
EXPECT_THAT(response_q_2->Pop(), IsOk());
EXPECT_EQ(cs.client_finished_q()->PopOrTimeout(), std::nullopt);
cs.client_session()->Finish(TestError());
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ServerFinishesDuringFirstRead) {
ClientAndServer cs(
[](auto, auto) { return kStopSession; });
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest(kOp1));
EXPECT_THAT(response_q_1->Pop(), Not(IsOk()));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest(kOp2);
EXPECT_THAT(response_q_2.status(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ServerFinishesDuringConstruction) {
ClientAndServer cs(nullptr,
[]() { return kStopSession; });
absl::StatusOr<Queue*> response_q_1 = cs.SendSimpleRequest(kOp1);
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest(kOp2);
ExpectHeadAndTail({response_q_1, response_q_2});
if (response_q_1.ok()) EXPECT_THAT(response_q_1.value()->Pop(), Not(IsOk()));
if (response_q_2.ok()) EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesAfterServerConsumesFirstRequest) {
std::atomic<GrpcClientSession*> session_ptr;
ClientAndServer cs(
[session_ptr = &session_ptr](auto, auto) {
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest(kOp1));
EXPECT_THAT(response_q_1->Pop(), Not(IsOk()));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest(kOp2);
EXPECT_THAT(response_q_2.status(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesAfterServerWritesFirstResponse) {
std::atomic<GrpcClientSession*> session_ptr;
ClientAndServer cs(
[session_ptr = &session_ptr](const IfrtRequest& r,
ServerStream* s) {
IfrtResponse response;
response.mutable_response_metadata()->set_op_id(
r.request_metadata().op_id());
s->Write(response);
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest(kOp1));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest(kOp2);
response_q_1->Pop().IgnoreError();
if (response_q_2.ok()) {
EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
}
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesDuringServerConstruction) {
std::atomic<GrpcClientSession*> session_ptr;
absl::Notification init_done;
ClientAndServer cs(nullptr,
[session_ptr = &session_ptr,
init_done = &init_done]() {
init_done->WaitForNotification();
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
init_done.Notify();
absl::StatusOr<Queue*> response_q_1 = cs.SendSimpleRequest(kOp1);
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest(kOp2);
if (response_q_1.ok()) {
EXPECT_THAT(response_q_1.value()->Pop(), Not(IsOk()));
}
if (response_q_2.ok()) {
EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
}
ExpectHeadAndTail({response_q_1, response_q_2});
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, MethodsAfterFinishReturnError) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest(kOp1));
cs.client_session()->Finish(TestError());
EXPECT_THAT(cs.SendSimpleRequest(kOp2), Not(IsOk()));
response_q_1->PopAllDuringDestruction();
}
TEST(GrpcClientSessionTest, ReceivingBadIfrtResponseDoesNotCrash) {
ClientAndServer cs(
[](const IfrtRequest& r, ServerStream* s) mutable {
IfrtResponse resp;
resp.mutable_response_metadata()->set_op_id(kOp2);
s->Write(resp);
resp.mutable_response_metadata()->set_op_id(
r.request_metadata().op_id());
s->Write(resp);
return kContinueSession;
});
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q, cs.SendSimpleRequest(kOp1));
EXPECT_THAT(response_q->Pop(), IsOk());
}
TEST(GrpcClientSessionTest, BadInitialChannelFailsPromptly) {
std::string address =
absl::StrCat("localhost:", tsl::testing::PickUnusedPortOrDie());
std::shared_ptr<::grpc::Channel> channel =
::grpc::CreateChannel(address, GetClientCredentials());
std::unique_ptr<grpc::GrpcIfrtService::StubInterface> stub =
grpc::GrpcIfrtService::NewStub(channel);
EXPECT_TRUE(stub != nullptr);
auto session_finished = std::make_shared<Queue>();
auto session = GrpcClientSession::Create(
std::move(stub), Metadata(),
[session_finished](absl::Status s) { session_finished->Push(s); });
EXPECT_THAT(session_finished->Pop(), Not(IsOk()));
}
}
}
}
} |
1,805 | cpp | tensorflow/tensorflow | executable | third_party/xla/xla/backends/interpreter/executable.cc | third_party/xla/xla/python/ifrt_proxy/client/executable_test.cc | #ifndef XLA_BACKENDS_INTERPRETER_EXECUTABLE_H_
#define XLA_BACKENDS_INTERPRETER_EXECUTABLE_H_
#include <memory>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/backends/interpreter/executable_base.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/executable.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_execution_profile.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/service/shaped_buffer.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace interpreter {
class InterpreterExecutable : public InterpreterExecutableBase {
public:
InterpreterExecutable(
std::unique_ptr<HloModule> hlo_module,
std::unique_ptr<HloEvaluator> evaluator,
std::optional<DynamicDimensionInference> dynamic_dymension_inference);
static int64_t ShapeSizeBytes(const Shape& shape);
protected:
absl::StatusOr<Literal> Evaluate(
const ServiceExecutableRunOptions* run_options,
const HloComputation& computation,
absl::Span<const Literal> arg_literals) override
ABSL_LOCKS_EXCLUDED(evaluator_lock_);
std::unique_ptr<HloEvaluator> evaluator_ ABSL_PT_GUARDED_BY(evaluator_lock_);
mutable absl::Mutex evaluator_lock_;
private:
std::optional<DynamicDimensionInference> dynamic_dimension_inference_;
InterpreterExecutable(const InterpreterExecutable&) = delete;
InterpreterExecutable& operator=(const InterpreterExecutable&) = delete;
};
}
}
#endif
#include "xla/backends/interpreter/executable.h"
#include <algorithm>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/backends/interpreter/executable_base.h"
#include "xla/backends/interpreter/executor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace interpreter {
InterpreterExecutable::InterpreterExecutable(
std::unique_ptr<HloModule> hlo_module,
std::unique_ptr<HloEvaluator> evaluator,
std::optional<DynamicDimensionInference> dynamic_dymension_inference)
: InterpreterExecutableBase(std::move(hlo_module)),
evaluator_(std::move(evaluator)),
dynamic_dimension_inference_(std::move(dynamic_dymension_inference)) {
if (dynamic_dimension_inference_.has_value()) {
evaluator_->set_dynamic_dimension_inference(
&dynamic_dimension_inference_.value());
}
}
absl::StatusOr<Literal> InterpreterExecutable::Evaluate(
const ServiceExecutableRunOptions* run_options,
const HloComputation& computation, absl::Span<const Literal> arg_literals) {
absl::MutexLock lock(&evaluator_lock_);
evaluator_->ResetVisitStates();
return evaluator_->Evaluate(computation, arg_literals);
}
int64_t InterpreterExecutable::ShapeSizeBytes(const Shape& shape) {
if (shape.IsOpaque()) {
return sizeof(void*);
}
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
}
} | #include "xla/python/ifrt_proxy/client/executable.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout_util.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt_proxy/client/array.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/client/host_buffer.h"
#include "xla/python/ifrt_proxy/client/mock_client_session.h"
#include "xla/python/ifrt_proxy/client/mock_host_buffer.h"
#include "xla/python/ifrt_proxy/client/rpc_helper.h"
#include "xla/python/ifrt_proxy/client/version.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Optional;
using ::testing::Pointee;
using ::testing::Return;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
#if defined(PLATFORM_GOOGLE)
using ::testing::EquivToProto;
using ::testing::proto::Partially;
#endif
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(kClientMinVersion);
return version;
}
class LoadedExecutableTest : public ::testing::Test {
protected:
void SetUp() override {
session_ = std::make_shared<MockClientSession>();
rpc_helper_ = std::make_shared<RpcHelper>(Version(), session_);
host_buffer_store_ = std::make_shared<MockClientHostBufferStore>();
rpc_helper_->set_host_buffer_store(host_buffer_store_);
EXPECT_CALL(*session_, Enqueue(_))
.WillRepeatedly(Return(Future<ClientSession::Response>(
absl::InternalError("Request has no mock handlers"))));
}
std::shared_ptr<MockClientSession> session_;
std::shared_ptr<RpcHelper> rpc_helper_;
std::shared_ptr<ClientHostBufferStore> host_buffer_store_;
};
#if defined(PLATFORM_GOOGLE)
TEST_F(LoadedExecutableTest, Metadata) {
IfrtResponse response;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
loaded_executable_metadata_response {
parameter_shardings {
shardings { type: REPLICATED }
shardings {
type: OTHER
tile_shape {
element_type: BF16
dimensions: [ 2, 2 ]
}
tile_assignment_dimensions: [ 0, 1 ]
}
}
output_shardings { shardings { type: REPLICATED } }
parameter_layouts_list {
layouts { minor_to_major: 0 }
layouts { minor_to_major: [ 1, 0 ] }
}
output_layouts_list { layouts { minor_to_major: [ 1, 0 ] } }
output_memory_kinds { memory_kind_lists { memory_kinds: [ "foo" ] } }
}
)pb",
&response));
EXPECT_CALL(*session_, Enqueue(Pointee(Partially(EquivToProto(
R"pb(loaded_executable_metadata_request {
loaded_executable_handle: 1234
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
MockClient client;
LoadedExecutable executable(
&client, rpc_helper_, 1234, "foo",
2, {},
{}, "fingerprint",
Future<>(absl::OkStatus()),
{}, {});
EXPECT_THAT(
executable.GetParameterShardings(),
Optional(ElementsAre(
EquivToProto(R"pb(type: REPLICATED)pb"),
EquivToProto(R"pb(type: OTHER
tile_shape {
element_type: BF16
dimensions: [ 2, 2 ]
}
tile_assignment_dimensions: [ 0, 1 ])pb"))));
EXPECT_THAT(executable.GetOutputShardings(),
Optional(ElementsAre(EquivToProto(R"pb(type: REPLICATED)pb"))));
ASSERT_OK_AND_ASSIGN(auto parameter_layouts,
executable.GetParameterLayouts());
EXPECT_EQ(parameter_layouts.size(), 2);
EXPECT_EQ(
tensorflow::down_cast<xla::PjRtXlaLayout*>(parameter_layouts[0].get())
->xla_layout(),
xla::LayoutUtil::MakeDescendingLayout(1));
EXPECT_EQ(
tensorflow::down_cast<xla::PjRtXlaLayout*>(parameter_layouts[1].get())
->xla_layout(),
xla::LayoutUtil::MakeDescendingLayout(2));
ASSERT_OK_AND_ASSIGN(auto output_layouts, executable.GetOutputLayouts());
EXPECT_EQ(output_layouts.size(), 1);
EXPECT_EQ(tensorflow::down_cast<xla::PjRtXlaLayout*>(output_layouts[0].get())
->xla_layout(),
xla::LayoutUtil::MakeDescendingLayout(2));
EXPECT_THAT(executable.GetOutputMemoryKinds(),
IsOkAndHolds(ElementsAre(ElementsAre("foo"))));
}
#endif
#if defined(PLATFORM_GOOGLE)
TEST_F(LoadedExecutableTest, Execute) {
MockDevice device;
ON_CALL(device, Id()).WillByDefault(Return(DeviceId(1)));
MockClient client;
ON_CALL(client, LookupDevice(DeviceId(1))).WillByDefault(Return(&device));
LoadedExecutable executable(
&client, rpc_helper_, 1234, "foo",
2, {},
{}, "fingerprint",
Future<>(absl::OkStatus()),
{}, {});
IfrtResponse response;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
loaded_executable_execute_response {
status_handle: 2000
outputs {
dtype { kind: KIND_F32 }
shape { dims: [ 4, 4 ] }
array_handle: 3000
}
outputs {
dtype { kind: KIND_F16 }
shape { dims: [ 8 ] }
array_handle: 3001
}
}
)pb",
&response));
{
auto* outputs = response.mutable_loaded_executable_execute_response()
->mutable_outputs();
TF_ASSERT_OK_AND_ASSIGN(
*(*outputs)[0].mutable_sharding(),
SingleDeviceSharding::Create(&device, MemoryKind())->ToProto());
TF_ASSERT_OK_AND_ASSIGN(
*(*outputs)[1].mutable_sharding(),
SingleDeviceSharding::Create(&device, MemoryKind())->ToProto());
}
EXPECT_CALL(*session_, Enqueue(Pointee(Partially(EquivToProto(
R"pb(loaded_executable_execute_request {
loaded_executable_handle: 1234
args_handles: [ 1000, 1001 ]
device_ids: [ 1 ]
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
response_metadata {
status {
code: 2 # UNKNOWN
message: "injected error"
}
}
)pb",
&response));
EXPECT_CALL(*session_,
Enqueue(Pointee(Partially(EquivToProto(R"pb(check_future_request {
future_handle: 2000
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
DeviceList devices({&device});
std::vector<tsl::RCReference<xla::ifrt::Array>> args;
for (const uint64_t handle : {1000, 1001}) {
args.push_back(tsl::MakeRef<Array>(
&client, rpc_helper_, DType(DType::kF32), Shape({2, 2}),
OpaqueSharding::Create(devices, MemoryKind()), ArrayHandle{handle}));
}
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable.Execute(
absl::MakeSpan(args),
xla::ifrt::LoadedExecutable::ExecuteOptions(), devices));
EXPECT_THAT(result.status.Await(),
StatusIs(absl::StatusCode::kUnknown, "injected error"));
ASSERT_THAT(result.outputs, SizeIs(2));
const auto output0 = result.outputs[0];
EXPECT_EQ(output0->dtype(), DType(DType::kF32));
EXPECT_EQ(output0->shape(), Shape({4, 4}));
EXPECT_EQ(llvm::cast<Array>(output0.get())->handle().handle, 3000);
const auto output1 = result.outputs[1];
EXPECT_EQ(output1->dtype(), DType(DType::kF16));
EXPECT_EQ(output1->shape(), Shape({8}));
EXPECT_EQ(llvm::cast<Array>(output1.get())->handle().handle, 3001);
}
#endif
#if defined(PLATFORM_GOOGLE)
TEST_F(LoadedExecutableTest, Delete) {
MockClient client;
LoadedExecutable executable(
&client, rpc_helper_, 1234, "foo",
2, {},
{}, "fingerprint",
Future<>(absl::OkStatus()),
{}, {});
{
IfrtResponse response;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
loaded_executable_delete_response { future_handle: 2000 }
)pb",
&response));
EXPECT_CALL(*session_, Enqueue(Pointee(Partially(EquivToProto(
R"pb(loaded_executable_delete_request {
loaded_executable_handle: 1234
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
response_metadata {
status {
code: 2 # UNKNOWN
message: "injected error"
}
}
)pb",
&response));
EXPECT_CALL(
*session_,
Enqueue(Pointee(Partially(EquivToProto(R"pb(check_future_request {
future_handle: 2000
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
Future<> result = executable.Delete();
EXPECT_THAT(result.Await(),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
{
IfrtResponse response;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
loaded_executable_is_deleted_response { is_deleted: true }
)pb",
&response));
EXPECT_CALL(*session_, Enqueue(Pointee(Partially(EquivToProto(
R"pb(loaded_executable_is_deleted_request {
loaded_executable_handle: 1234
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
EXPECT_TRUE(executable.IsDeleted());
}
IfrtResponse response;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
loaded_executable_destruct_response {}
)pb",
&response));
EXPECT_CALL(*session_, Enqueue(Pointee(Partially(EquivToProto(
R"pb(loaded_executable_destruct_request {
loaded_executable_handle: 1234
})pb")))))
.WillOnce(MockClientSessionReturnResponse(response));
}
#endif
}
}
}
} |
1,806 | cpp | tensorflow/tensorflow | client | third_party/xla/xla/pjrt/distributed/client.cc | third_party/xla/xla/tests/client_test.cc | #ifndef XLA_CLIENT_CLIENT_H_
#define XLA_CLIENT_CLIENT_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/client/xla_computation.h"
#include "xla/literal.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/service.h"
#include "xla/types.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
namespace xla {
class Client {
public:
explicit Client(Service* stub);
virtual ~Client();
using XlaComputationInstance = xla::XlaComputationInstance;
absl::StatusOr<ExecutionHandle> Compile(
const XlaComputation& computation,
absl::Span<const Shape> argument_shapes,
const ExecutionOptions* execution_options = nullptr);
absl::StatusOr<std::unique_ptr<GlobalData>> Execute(
const ExecutionHandle& handle, absl::Span<GlobalData* const> arguments,
ExecutionProfile* execution_profile = nullptr
);
absl::StatusOr<std::unique_ptr<GlobalData>> Execute(
const XlaComputation& computation,
absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options = nullptr,
ExecutionProfile* execution_profile = nullptr);
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>> ExecuteParallel(
absl::Span<const XlaComputationInstance> computations);
absl::StatusOr<std::vector<DeviceHandle>> GetDeviceHandles(
int64_t device_count);
absl::StatusOr<Literal> Transfer(const GlobalData& data,
const Shape* shape_with_layout = nullptr);
absl::StatusOr<std::unique_ptr<GlobalData>> TransferToServer(
const LiteralSlice& literal, const DeviceHandle* device_handle = nullptr);
absl::Status TransferToInfeed(const LiteralSlice& literal,
int64_t replica_id = 0,
const DeviceHandle* device_handle = nullptr);
absl::StatusOr<Literal> TransferFromOutfeed(
const Shape* shape_with_layout, int64_t replica_id = 0,
const DeviceHandle* device_handle = nullptr);
absl::Status ResetDevice();
absl::StatusOr<Literal> ExecuteAndTransfer(
const XlaComputation& computation,
absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options = nullptr,
ExecutionProfile* execution_profile = nullptr);
absl::StatusOr<Literal> ComputeConstant(
const XlaComputation& computation,
const Layout* output_layout = nullptr) const;
absl::Status Unregister(const GlobalData& data);
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>> DeconstructTuple(
const GlobalData& data);
absl::StatusOr<Shape> GetShape(const GlobalData& data);
absl::StatusOr<std::unique_ptr<ProgramShape>> GetComputationShape(
const XlaComputation& computation);
absl::StatusOr<ChannelHandle> CreateChannelHandle();
absl::StatusOr<ChannelHandle> CreateHostToDeviceChannelHandle();
absl::StatusOr<ChannelHandle> CreateDeviceToHostChannelHandle();
absl::StatusOr<XlaComputation> LoadSnapshot(const HloSnapshot& module);
Service* stub() { return stub_; }
private:
absl::StatusOr<ChannelHandle> CreateChannelHandleByType(
ChannelHandle::ChannelType type);
Service* stub_;
Client(const Client&) = delete;
Client& operator=(const Client&) = delete;
};
}
#endif
#include "xla/client/client.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/client/xla_computation.h"
#include "xla/debug_options_flags.h"
#include "xla/execution_options_util.h"
#include "xla/literal.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
namespace xla {
Client::Client(Service* stub) : stub_(stub) {}
Client::~Client() = default;
absl::StatusOr<Literal> Client::Transfer(const GlobalData& data,
const Shape* shape_with_layout) {
return stub_->TransferToClient(data, shape_with_layout);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::TransferToServer(
const LiteralSlice& literal, const DeviceHandle* device_handle) {
return stub_->TransferToServer(literal, device_handle);
}
absl::Status Client::TransferToInfeed(const LiteralSlice& literal,
int64_t replica_id,
const DeviceHandle* device_handle) {
return stub_->TransferToInfeed(literal, replica_id, device_handle);
}
absl::StatusOr<Literal> Client::TransferFromOutfeed(
const Shape* shape_with_layout, int64_t replica_id,
const DeviceHandle* device_handle) {
return stub_->TransferFromOutfeed(shape_with_layout, replica_id,
device_handle);
}
absl::Status Client::ResetDevice() { return stub_->ResetDevice(); }
absl::StatusOr<Literal> Client::ExecuteAndTransfer(
const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GlobalData> data,
Execute(computation, arguments, execution_options, execution_profile));
std::optional<Shape> shape_with_output_layout;
if (execution_options && execution_options->has_shape_with_output_layout()) {
shape_with_output_layout =
Shape(execution_options->shape_with_output_layout());
}
return Transfer(*data, shape_with_output_layout.has_value()
? &(*shape_with_output_layout)
: nullptr);
}
absl::StatusOr<Literal> Client::ComputeConstant(
const XlaComputation& computation, const Layout* output_layout) const {
return stub_->ComputeConstantGraph(computation, output_layout);
}
absl::StatusOr<XlaComputation> Client::LoadSnapshot(const HloSnapshot& module) {
TF_RET_CHECK(module.has_hlo() && module.hlo().has_hlo_module());
return XlaComputation(module.hlo().hlo_module());
}
absl::StatusOr<ExecutionHandle> Client::Compile(
const XlaComputation& computation, absl::Span<const Shape> argument_shapes,
const ExecutionOptions* execution_options) {
std::optional<ExecutionOptions> opts;
if (!execution_options) {
opts = CreateDefaultExecutionOptions();
}
return stub_->Compile(computation, argument_shapes,
execution_options ? *execution_options : *opts);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
const ExecutionHandle& handle, absl::Span<GlobalData* const> arguments,
ExecutionProfile* execution_profile) {
return stub_->Execute(handle, arguments, execution_profile);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
std::optional<ExecutionOptions> options_storage;
if (!execution_options || execution_options->device_handles().empty()) {
if (execution_options) {
options_storage.emplace(*execution_options);
} else {
options_storage.emplace(CreateDefaultExecutionOptions());
}
execution_options = &*options_storage;
TF_ASSIGN_OR_RETURN(auto device_handles,
GetDeviceHandles(1));
TF_RET_CHECK(!device_handles.empty());
*options_storage->add_device_handles() = std::move(device_handles[0]);
}
std::vector<XlaComputationInstance> computation_instances = {
XlaComputationInstance{
computation,
std::vector<GlobalData*>(arguments.begin(), arguments.end()),
*execution_options, execution_profile}};
VLOG(1) << "Making ExecuteParallel request: "
<< execution_options->DebugString();
TF_ASSIGN_OR_RETURN(auto results, ExecuteParallel(computation_instances));
VLOG(1) << "ExecuteParallel request done.";
for (int64_t i = 0, end = results.size(); i < end; i++) {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(*results[i]));
if (!ShapeUtil::IsEmptyTuple(shape)) {
VLOG(3) << "Fetching result from device " << i << ": "
<< ShapeUtil::HumanString(shape);
return std::move(results[i]);
}
}
TF_RET_CHECK(!results.empty());
VLOG(1) << "Defaulting to device 0 result";
return std::move(results[0]);
}
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>>
Client::ExecuteParallel(absl::Span<const XlaComputationInstance> computations) {
return stub_->ExecuteGraphParallel(computations);
}
absl::StatusOr<std::vector<DeviceHandle>> Client::GetDeviceHandles(
int64_t device_count) {
if (device_count < 1) {
return InvalidArgument("device_count must be greater than 0");
}
return stub_->GetDeviceHandles(device_count);
}
absl::Status Client::Unregister(const GlobalData& data) {
return stub_->Unregister(data.handle());
}
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>>
Client::DeconstructTuple(const GlobalData& data) {
return stub_->DeconstructTuple(data);
}
absl::StatusOr<std::unique_ptr<ProgramShape>> Client::GetComputationShape(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(const auto& result, computation.GetProgramShape());
return std::make_unique<ProgramShape>(result);
}
absl::StatusOr<Shape> Client::GetShape(const GlobalData& data) {
return stub_->GetShape(data);
}
absl::StatusOr<ChannelHandle> Client::CreateChannelHandleByType(
ChannelHandle::ChannelType type) {
return stub_->CreateChannelHandle(type);
}
absl::StatusOr<ChannelHandle> Client::CreateChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_DEVICE);
}
absl::StatusOr<ChannelHandle> Client::CreateHostToDeviceChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::HOST_TO_DEVICE);
}
absl::StatusOr<ChannelHandle> Client::CreateDeviceToHostChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_HOST);
}
} | #include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/client/global_data.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ClientTest : public ClientLibraryTestBase {};
XLA_TEST_F(ClientTest, ExecuteWithLayout) {
XlaBuilder b(TestName());
std::vector<std::vector<int64_t>> layouts = {{0, 1}, {1, 0}};
for (const std::vector<int64_t>& execute_layout : layouts) {
for (const std::vector<int64_t>& transfer_layout : layouts) {
Add(ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}),
ConstantR2<int32_t>(&b, {{10, 20}, {30, 40}}));
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
execute_layout)
.ToProto();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> data,
client_->Execute(computation, {}, &execution_options));
Literal expected_literal = LiteralUtil::CreateR2WithLayout<int32_t>(
{{11, 22}, {33, 44}}, LayoutUtil::MakeLayout(transfer_layout));
TF_ASSERT_OK_AND_ASSIGN(
auto computed, client_->Transfer(*data, &expected_literal.shape()));
ASSERT_TRUE(LiteralTestUtil::EqualShapesAndLayouts(
expected_literal.shape(), computed.shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(expected_literal, computed));
}
}
}
XLA_TEST_F(ClientTest, ExecuteWithTupleLayout) {
XlaBuilder b(TestName());
Tuple(&b, {ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}),
ConstantR2<int32_t>(&b, {{10, 20}, {30, 40}})});
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{1, 0})})
.ToProto();
TF_ASSERT_OK_AND_ASSIGN(
auto result,
client_->ExecuteAndTransfer(computation, {}, &execution_options));
LiteralTestUtil::ExpectR2Equal<int32_t>({{1, 2}, {3, 4}},
LiteralSlice(result, {0}));
LiteralTestUtil::ExpectR2Equal<int32_t>({{10, 20}, {30, 40}},
LiteralSlice(result, {1}));
EXPECT_TRUE(result.shape().IsTuple());
EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.shape()));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::GetTupleElementShape(result.shape(), 0),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{0, 1})));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::GetTupleElementShape(result.shape(), 1),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{1, 0})));
}
XLA_TEST_F(ClientTest,
DISABLED_ON_INTERPRETER(DISABLED_ON_GPU(ExecuteParallel))) {
XlaComputation add_with_one_arg, mul_with_two_args, dot_with_one_arg;
Shape shape = ShapeUtil::MakeShape(S32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> const_arg,
client_->TransferToServer(
LiteralUtil::CreateR2<int32_t>({{5, 6}, {7, 8}})));
XlaBuilder b(TestName() + ".add");
Add(Parameter(&b, 0, shape, "param_0"),
ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}));
TF_ASSERT_OK_AND_ASSIGN(add_with_one_arg, b.Build());
std::vector<XlaComputationInstance> computation_instances;
TF_ASSERT_OK_AND_ASSIGN(std::vector<xla::DeviceHandle> devices,
client_->GetDeviceHandles(1));
ASSERT_EQ(devices.size(), 1);
ExecutionOptions options = execution_options_;
*options.add_device_handles() = devices[0];
computation_instances.push_back(XlaComputationInstance(
add_with_one_arg, {const_arg.get()}, options, nullptr));
TF_ASSERT_OK_AND_ASSIGN(auto results,
client_->ExecuteParallel(computation_instances));
auto expected_result = LiteralUtil::CreateR2<int32_t>({{6, 8}, {10, 12}});
TF_ASSERT_OK_AND_ASSIGN(
auto result_literal,
client_->Transfer(*results[0], &expected_result.shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(expected_result, result_literal));
}
}
} |
1,807 | cpp | tensorflow/tensorflow | grpc_server | third_party/xla/xla/python/ifrt_proxy/server/grpc_server.cc | third_party/xla/xla/python/ifrt_proxy/server/grpc_server_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_SERVER_GRPC_SERVER_H_
#define XLA_PYTHON_IFRT_PROXY_SERVER_GRPC_SERVER_H_
#include <memory>
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "grpcpp/server.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
namespace xla {
namespace ifrt {
namespace proxy {
class GrpcServer {
public:
static absl::StatusOr<std::unique_ptr<GrpcServer>> Create(
absl::string_view address,
std::unique_ptr<grpc::GrpcIfrtService::Service> impl);
static absl::StatusOr<std::unique_ptr<GrpcServer>>
CreateFromIfrtClientFactory(
absl::string_view address,
absl::AnyInvocable<absl::StatusOr<std::shared_ptr<xla::ifrt::Client>>()>
backend_ifrt_client_factory);
~GrpcServer();
std::string address() const { return address_; }
void Wait() { server_->Wait(); }
private:
GrpcServer(absl::string_view address,
std::unique_ptr<grpc::GrpcIfrtService::Service> impl,
std::unique_ptr<::grpc::Server> server)
: address_(address), impl_(std::move(impl)), server_(std::move(server)) {}
const std::string address_;
std::unique_ptr<grpc::GrpcIfrtService::Service> impl_;
std::unique_ptr<::grpc::Server> server_;
};
}
}
}
#endif
#include "xla/python/ifrt_proxy/server/grpc_server.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "grpc/grpc.h"
#include "grpcpp/completion_queue.h"
#include "grpcpp/grpcpp.h"
#include "grpcpp/server_builder.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
GrpcServer::~GrpcServer() {
server_->Shutdown();
server_->Wait();
}
absl::StatusOr<std::unique_ptr<GrpcServer>> GrpcServer::Create(
absl::string_view address,
std::unique_ptr<grpc::GrpcIfrtService::Service> impl) {
if (impl == nullptr) {
return absl::InvalidArgumentError(
"Service implementation cannot be a nullptr.");
}
::grpc::ServerBuilder builder;
builder.AddChannelArgument(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, -1);
builder.AddChannelArgument(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, -1);
builder.RegisterService(impl.get());
builder.AddListeningPort(std::string(address), GetServerCredentials());
auto server = builder.BuildAndStart();
if (server == nullptr) {
return absl::UnavailableError(
absl::StrCat("Failed to initialize gRPC server at address:", address));
}
return absl::WrapUnique<GrpcServer>(
new GrpcServer(address, std::move(impl), std::move(server)));
}
absl::StatusOr<std::unique_ptr<GrpcServer>>
GrpcServer::CreateFromIfrtClientFactory(
absl::string_view address,
absl::AnyInvocable<absl::StatusOr<std::shared_ptr<xla::ifrt::Client>>()>
backend_ifrt_client_factory) {
if (backend_ifrt_client_factory == nullptr) {
return absl::InvalidArgumentError(
"backend_ifrt_client_factory cannot be nullptr.");
}
auto service = std::make_unique<GrpcServiceImpl>(
[ifrt_client_factory = std::move(backend_ifrt_client_factory)](
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<HostBufferStore> host_buffer_store) mutable
-> absl::StatusOr<std::unique_ptr<BackendInterface>> {
TF_ASSIGN_OR_RETURN(auto ifrt_client, ifrt_client_factory());
return IfrtBackend::Create(version, session_id, std::move(ifrt_client),
std::move(host_buffer_store));
});
return Create(address, std::move(service));
}
}
}
} | #include "xla/python/ifrt_proxy/server/grpc_server.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
class FakeIfrtService : public grpc::GrpcIfrtService::Service {};
TEST(GrpcServerTest, CreationTest) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
ASSERT_THAT(GrpcServer::Create(addr, std::move(grpc_service_impl)), IsOk());
}
TEST(GrpcServerTest, CreationFailsIfImplIsNullptr) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
EXPECT_THAT(GrpcServer::Create(addr, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(GrpcServerTest, CreationFailsWithInvalidAddress) {
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
EXPECT_THAT(GrpcServer::Create("invalid-address",
std::move(grpc_service_impl)),
Not(IsOk()));
}
TEST(GrpcServerTest, RetrievingServerAddressWorks) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
TF_ASSERT_OK_AND_ASSIGN(
auto grpc_server, GrpcServer::Create(addr, std::move(grpc_service_impl)));
EXPECT_EQ(grpc_server->address(), addr);
}
}
}
}
} |
1,808 | cpp | tensorflow/tensorflow | version | third_party/xla/xla/python/ifrt_proxy/server/version.cc | third_party/xla/xla/python/ifrt_proxy/server/version_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_SERVER_VERSION_H_
#define XLA_PYTHON_IFRT_PROXY_SERVER_VERSION_H_
#include "absl/status/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
inline constexpr int kServerMinVersion = 1;
inline constexpr int kServerMaxVersion = 3;
absl::StatusOr<int> ChooseVersion(int client_min_version,
int client_max_version,
int server_min_version = kServerMinVersion,
int server_max_version = kServerMaxVersion);
}
}
}
#endif
#include "xla/python/ifrt_proxy/server/version.h"
#include <algorithm>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::StatusOr<int> ChooseVersion(int client_min_version,
int client_max_version,
int server_min_version,
int server_max_version) {
const int version = std::min(server_max_version, client_max_version);
if (version < server_min_version || version < client_min_version) {
return absl::InvalidArgumentError(absl::StrCat(
"IFRT Proxy client and server failed to agree on the "
"protocol version; supported versions: client = [",
client_min_version, ", ", client_max_version, "], server = [",
server_min_version, ", ", server_max_version, "]"));
}
return version;
}
}
}
} | #include "xla/python/ifrt_proxy/server/version.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
struct Param {
int client_min_version;
int client_max_version;
int server_min_version;
int server_max_version;
};
class CompatibleVersionTest : public ::testing::TestWithParam<Param> {};
TEST_P(CompatibleVersionTest, Verify) {
const Param& param = GetParam();
EXPECT_THAT(ChooseVersion(param.client_min_version, param.client_max_version,
param.server_min_version, param.server_max_version),
IsOk());
}
INSTANTIATE_TEST_SUITE_P(CompatibleVersionTest, CompatibleVersionTest,
::testing::Values(Param{1, 1, 1, 1}, Param{1, 2, 2, 2},
Param{2, 2, 1, 2},
Param{1, 3, 3, 4}));
class IncompatibleVersionTest : public ::testing::TestWithParam<Param> {};
TEST_P(IncompatibleVersionTest, Verify) {
const Param& param = GetParam();
EXPECT_THAT(ChooseVersion(param.client_min_version, param.client_max_version,
param.server_min_version, param.server_max_version),
StatusIs(absl::StatusCode::kInvalidArgument));
}
INSTANTIATE_TEST_SUITE_P(IncompatibleVersionTest, IncompatibleVersionTest,
::testing::Values(Param{1, 2, 3, 3}, Param{1, 3, 4, 6},
Param{1, 1, 2, 2}));
}
}
}
} |
1,809 | cpp | tensorflow/tensorflow | ifrt_backend | third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend.cc | third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_SERVER_IFRT_BACKEND_H_
#define XLA_PYTHON_IFRT_PROXY_SERVER_IFRT_BACKEND_H_
#include <cstdint>
#include <functional>
#include <memory>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace proxy {
class BackendInterface {
public:
virtual ~BackendInterface() = default;
using Response = std::shared_ptr<IfrtResponse>;
virtual Future<Response> Process(std::unique_ptr<IfrtRequest> request) = 0;
};
class IfrtBackend final : public BackendInterface {
public:
static absl::StatusOr<std::unique_ptr<IfrtBackend>> Create(
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store);
~IfrtBackend() override;
const IfrtProxyVersion& version() const { return version_; }
Future<Response> Process(std::unique_ptr<IfrtRequest> request) override;
private:
class HandleGenerator {
public:
uint64_t New();
void BulkNew(absl::Span<uint64_t> handles);
private:
absl::Mutex mu_;
uint64_t current_ ABSL_GUARDED_BY(mu_) = 1;
};
IfrtBackend(IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store);
Future<Response> AsyncExecute(
std::function<absl::StatusOr<Response>()> handle_fn,
tsl::thread::ThreadPool* thread_pool = nullptr);
absl::StatusOr<Response> HandleInit(std::unique_ptr<IfrtRequest> request);
Future<Response> HandleCheckFutureRequest(
std::unique_ptr<IfrtRequest> request);
Future<Response> HandleCheckValueReadyRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleMakeArrayFromHostBufferRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleAssembleArrayFromSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleRemapArraysRequest(
std::unique_ptr<IfrtRequest> request);
Future<Response> HandleCopyToHostBufferRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleDisassembleIntoSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleCopyArraysRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleReshardRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleFullyReplicatedShardRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleDeleteArrayRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleIsArrayDeletedRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleDestructArrayRequest(
std::unique_ptr<IfrtRequest> request);
Future<Response> HandleCompileRequest(std::unique_ptr<IfrtRequest> request);
Future<Response> HandleLoadedExecutableMetadataRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleLoadedExecutableExecuteRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleLoadedExecutableDeleteRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleLoadedExecutableIsDeletedRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleLoadedExecutableDestructRequest(
std::unique_ptr<IfrtRequest> request);
Future<Response> HandleLoadedHostCallbackPollRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleLoadedHostCallbackReturnRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<Response> HandleGetDefaultDeviceAssignmentRequest(
std::unique_ptr<IfrtRequest> request);
absl::StatusOr<std::shared_ptr<xla::ifrt::LoadedExecutable>>
GetLoadedExecutable(uint64_t handle);
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> GetArray(uint64_t handle);
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> GetArrayLocked(
uint64_t handle) ABSL_SHARED_LOCKS_REQUIRED(arrays_mutex_);
HandleGenerator handle_generator_;
const IfrtProxyVersion version_;
const uint64_t session_id_;
const std::shared_ptr<xla::ifrt::Client> client_;
const std::shared_ptr<HostBufferStore> host_buffer_store_;
absl::Mutex futures_mutex_;
absl::flat_hash_map<uint64_t, Future<>> futures_
ABSL_GUARDED_BY(futures_mutex_);
absl::Mutex arrays_mutex_;
absl::flat_hash_map<uint64_t, tsl::RCReference<xla::ifrt::Array>> arrays_
ABSL_GUARDED_BY(arrays_mutex_);
absl::Mutex executables_mutex_;
absl::flat_hash_map<uint64_t, std::shared_ptr<xla::ifrt::LoadedExecutable>>
executables_ ABSL_GUARDED_BY(executables_mutex_);
absl::Mutex host_callback_queues_mutex_;
absl::flat_hash_map<uint64_t, std::shared_ptr<RemoteLoadedHostCallbackQueue>>
host_callback_queues_ ABSL_GUARDED_BY(host_callback_queues_mutex_);
absl::Mutex host_callback_executions_mutex_;
absl::flat_hash_map<uint64_t, RemoteLoadedHostCallbackQueue::ExecutionRequest>
host_callback_executions_
ABSL_GUARDED_BY(host_callback_executions_mutex_);
absl::Mutex in_flight_count_mutex_;
int64_t in_flight_count_ ABSL_GUARDED_BY(in_flight_count_mutex_) = 0;
tsl::thread::ThreadPool compile_thread_pool_;
};
}
}
}
#endif
#include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/remap_plan.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/value.h"
#include "xla/python/ifrt_proxy/common/array_util.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/proto_util.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace proxy {
IfrtBackend::IfrtBackend(IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store)
: version_(std::move(version)),
session_id_(session_id),
client_(std::move(ifrt_client)),
host_buffer_store_(std::move(host_buffer_store)),
compile_thread_pool_(
tsl::Env::Default(),
[]() {
tsl::ThreadOptions options;
options.stack_size = 240 * 1024;
return options;
}(),
"IfrtBackend",
32) {}
absl::StatusOr<std::unique_ptr<IfrtBackend>> IfrtBackend::Create(
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store) {
if (ifrt_client == nullptr) {
return absl::InvalidArgumentError("ifrt_client cannot be a nullptr.");
}
if (version.protocol_version() < kServerMinVersion ||
version.protocol_version() > kServerMaxVersion) {
return absl::FailedPreconditionError(absl::StrCat(
"Protocol version ", version.protocol_version(),
" is unsupported by IFRT Proxy server; supported versions: [",
kServerMinVersion, ",", kServerMaxVersion, "]"));
}
return absl::WrapUnique<IfrtBackend>(
new IfrtBackend(std::move(version), session_id, std::move(ifrt_client),
std::move(host_buffer_store)));
}
IfrtBackend::~IfrtBackend() {
{
absl::MutexLock lock(&host_callback_queues_mutex_);
for (const auto& [key, queue] : host_callback_queues_) {
queue->Close();
}
}
absl::flat_hash_map<uint64_t, RemoteLoadedHostCallbackQueue::ExecutionRequest>
host_callback_executions;
{
absl::MutexLock lock(&host_callback_executions_mutex_);
host_callback_executions.swap(host_callback_executions_);
}
for (auto& [handle, execution_request] : host_callback_executions) {
std::move(execution_request)
.status.Set(absl::CancelledError("IFRT backend has shut down"));
}
{
auto done = [this]() ABSL_SHARED_LOCKS_REQUIRED(in_flight_count_mutex_) {
return in_flight_count_ == 0;
};
absl::MutexLock lock(&in_flight_count_mutex_, absl::Condition(&done));
}
}
Future<BackendInterface::Response> IfrtBackend::Process(
std::unique_ptr<IfrtRequest> request) {
switch (request->request_case()) {
case IfrtRequest::RequestCase::kInitRequest:
return Future<Response>(HandleInit(std::move(request)));
case IfrtRequest::RequestCase::kCheckFutureRequest:
return HandleCheckFutureRequest(std::move(request));
case IfrtRequest::RequestCase::kMakeArrayFromHostBufferRequest:
return Future<Response>(
HandleMakeArrayFromHostBufferRequest(std::move(request)));
case IfrtRequest::RequestCase::kAssembleArrayFromSingleDeviceArraysRequest:
return Future<Response>(
HandleAssembleArrayFromSingleDeviceArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kRemapArraysRequest:
return Future<Response>(HandleRemapArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kCopyToHostBufferRequest:
return HandleCopyToHostBufferRequest(std::move(request));
case IfrtRequest::RequestCase::kDisassembleIntoSingleDeviceArraysRequest:
return Future<Response>(
HandleDisassembleIntoSingleDeviceArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kCheckValueReadyRequest:
return Future<Response>(HandleCheckValueReadyRequest(std::move(request)));
case IfrtRequest::RequestCase::kCopyArraysRequest:
return Future<Response>(HandleCopyArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kReshardRequest:
return Future<Response>(HandleReshardRequest(std::move(request)));
case IfrtRequest::RequestCase::kFullyReplicatedShardRequest:
return Future<Response>(
HandleFullyReplicatedShardRequest(std::move(request)));
case IfrtRequest::RequestCase::kDeleteArrayRequest:
return Future<Response>(HandleDeleteArrayRequest(std::move(request)));
case IfrtRequest::RequestCase::kIsArrayDeletedRequest:
return Future<Response>(HandleIsArrayDeletedRequest(std::move(request)));
case IfrtRequest::RequestCase::kDestructArrayRequest:
return Future<Response>(HandleDestructArrayRequest(std::move(request)));
case IfrtRequest::RequestCase::kCompileRequest:
return Future<Response>(HandleCompileRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableMetadataRequest:
return HandleLoadedExecutableMetadataRequest(std::move(request));
case IfrtRequest::RequestCase::kLoadedExecutableExecuteRequest:
return Future<Response>(
HandleLoadedExecutableExecuteRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableDeleteRequest:
return Future<Response>(
HandleLoadedExecutableDeleteRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableIsDeletedRequest:
return Future<Response>(
HandleLoadedExecutableIsDeletedRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableDestructRequest:
return Future<Response>(
HandleLoadedExecutableDestructRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedHostCallbackPollRequest:
return HandleLoadedHostCallbackPollRequest(std::move(request));
case IfrtRequest::RequestCase::kLoadedHostCallbackReturnRequest:
return Future<Response>(
HandleLoadedHostCallbackReturnRequest(std::move(request)));
case IfrtRequest::RequestCase::kGetDefaultDeviceAssignmentRequest:
return Future<Response>(
HandleGetDefaultDeviceAssignmentRequest(std::move(request)));
default:
return Future<Response>(absl::UnimplementedError(absl::StrCat(
"Got unimplemented request type: ", request->request_case())));
}
}
uint64_t IfrtBackend::HandleGenerator::New() {
absl::MutexLock lock(&mu_);
return current_++;
}
void IfrtBackend::HandleGenerator::BulkNew(absl::Span<uint64_t> handles) {
absl::MutexLock lock(&mu_);
std::iota(handles.begin(), handles.end(), current_);
current_ += handles.size();
}
Future<BackendInterface::Response> IfrtBackend::AsyncExecute(
std::function<absl::StatusOr<Response>()> handle_fn,
tsl::thread::ThreadPool* thread_pool) {
{
absl::MutexLock lock(&in_flight_count_mutex_);
++in_flight_count_;
}
auto promise = Future<Response>::CreatePromise();
auto f = [this, promise, handle_fn = std::move(handle_fn)]() mutable {
promise.Set(handle_fn());
{
absl::MutexLock lock(&in_flight_count_mutex_);
--in_flight_count_;
}
};
if (thread_pool != nullptr) {
thread_pool->Schedule(std::move(f));
} else {
tsl::Env::Default()->SchedClosure(std::move(f));
}
return Future<Response>(std::move(promise));
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleInit(
std::unique_ptr<IfrtRequest> request) {
std::unique_ptr<IfrtResponse> response =
NewIfrtResponse(request->request_metadata().op_id());
auto* init_resp = response->mutable_init_response();
init_resp->set_session_id(session_id_);
init_resp->set_platform_name(AsProtoStringData(client_->platform_name()));
init_resp->set_platform_version(
AsProtoStringData(client_->platform_version()));
init_resp->set_platform_id(client_->platform_id());
init_resp->set_runtime_type(AsProtoStringData(client_->runtime_type()));
init_resp->set_process_index(client_->process_index());
for (auto* device : client_->devices()) {
InitResponse::Device* d = init_resp->add_devices();
d->set_id(device->Id().value());
d->set_device_kind(AsProtoStringData(device->Kind()));
if (auto default_memory = device->DefaultMemory(); default_memory.ok()) {
d->set_default_memory_id((*default_memory)->Id().value());
}
for (const auto* memory : device->Memories()) {
d->add_memory_ids(memory->Id().value());
}
d->set_debug_string(AsProtoStringData(device->DebugString()));
d->set_to_string(AsProtoStringData(device->ToString()));
for (const auto& [name, attr] : device->Attributes()) {
TF_ASSIGN_OR_RETURN((*d->mutable_attributes())[name],
ToVariantProto(attr));
}
}
for (auto* addressable_device : client_->addressable_devices()) {
init_resp->add_addressable_device_ids(addressable_device->Id().value());
}
absl::flat_hash_map<int, xla::ifrt::Memory*> memories;
for (auto* device : client_->devices()) {
for (xla::ifrt::Memory* memory : device->Memories()) {
const auto [it, inserted] =
memories.insert({memory->Id().value(), memory});
if (!inserted && it->second != memory) {
return absl::FailedPreconditionError(absl::StrCat(
"Two memories cannot have the same id: ", memory->ToString(),
" vs. ", it->second->ToString()));
}
}
}
for (const auto& [id, memory] : memories) {
auto* m = init_resp->add_memories();
m->set_id(id);
m->set_memory_space_kind(AsProtoStringData(*memory->Kind().memory_kind()));
for (const auto* device : memory->Devices()) {
m->add_device_ids(device->Id().value());
}
m->set_debug_string(AsProtoStringData(memory->DebugString()));
m->set_to_string(AsProtoStringData(memory->ToString()));
}
return response;
}
Future<BackendInterface::Response> IfrtBackend::HandleCheckFutureRequest(
std::unique_ptr<IfrtRequest> request) {
const CheckFutureRequest& check_request = request->check_future_request();
Future<> future;
{
absl::MutexLock lock(&futures_mutex_);
const auto it = futures_.find(check_request.future_handle());
if (it == futures_.end()) {
return Future<Response>(absl::NotFoundError(absl::StrCat(
"Unknown future handle: ", check_request.future_handle())));
}
future = std::move(it->second);
futures_.erase(it);
}
auto promise = Future<BackendInterface::Response>::CreatePromise();
future.OnReady([op_id = request->request_metadata().op_id(), promise,
hold = future](absl::Status status) mutable {
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
auto ifrt_resp = NewIfrtResponse(op_id);
ifrt_resp->mutable_check_future_response();
promise.Set(std::move(ifrt_resp));
});
return Future<BackendInterface::Response>(std::move(promise));
}
Future<BackendInterface::Response> IfrtBackend::HandleCheckValueReadyRequest(
std::unique_ptr<IfrtRequest> request) {
std::vector<tsl::RCReference<xla::ifrt::Value>> values;
values.reserve(request->check_value_ready_request().value_handles_size());
for (const auto& value_handle :
request->check_value_ready_request().value_handles()) {
auto array = GetArray(value_handle);
if (!array.ok()) {
return Future<Response>(array.status());
}
values.push_back(*std::move(array));
}
auto ifrt_response_promise =
Future<BackendInterface::Response>::CreatePromise();
Future<BackendInterface::Response> ifrt_response_future(
ifrt_response_promise);
client_->GetReadyFuture(values).OnReady(
[op_id = request->request_metadata().op_id(),
promise = std::move(ifrt_response_promise)](
absl::Status status) mutable -> void {
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
auto ifrt_response = NewIfrtResponse(op_id);
ifrt_response->mutable_check_value_ready_response();
promise.Set(std::move(ifrt_response));
});
return ifrt_response_future;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleMakeArrayFromHostBufferRequest(
std::unique_ptr<IfrtRequest> request) {
if (!request->has_make_array_from_host_buffer_request()) {
return absl::InternalError(
"MakeArrayFromHostBuffer got an IfrtRequest with no "
"MakeArrayFromHostBufferRequest in it.");
}
auto* make_array_request =
request->mutable_make_array_from_host_buffer_request();
TF_ASSIGN_OR_RETURN(
auto sharding, Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
make_array_request->sharding()));
const auto byte_strides = [&]() -> std::optional<std::vector<int64_t>> {
if (!make_array_request->has_byte_strides()) return std::nullopt;
return FromByteStridesProto(make_array_request->byte_strides());
}();
TF_ASSIGN_OR_RETURN(const auto shape,
Shape::FromProto(make_array_request->shape()));
TF_ASSIGN_OR_RETURN(const auto dtype,
DType::FromProto(make_array_request->dtype()));
const uint64_t host_buffer_handle = make_array_request->host_buffer_handle();
absl::Cleanup cleanup = [&] {
CHECK_OK(host_buffer_store_->Delete(host_buffer_handle));
};
TF_ASSIGN_OR_RETURN(std::shared_ptr<const std::string> host_buffer,
host_buffer_store_->Lookup(host_buffer_handle));
std::move(cleanup).Invoke();
TF_ASSIGN_OR_RETURN(const auto mem_region,
ArrayMemRegion::FromMinimalMemRegion(
*host_buffer, dtype, shape, byte_strides));
TF_ASSIGN_OR_RETURN(
auto array,
client_->MakeArrayFromHostBuffer(
mem_region.zeroth_element(), dtype, std::move(shape),
std::move(byte_strides), std::move(sharding),
xla::ifrt::Client::HostBufferSemantics::
kImmutableUntilTransferCompletes,
[hold = std::move(host_buffer)]() mutable { hold.reset(); }));
uint64_t handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({handle, std::move(array)});
}
std::unique_ptr<IfrtResponse> response =
NewIfrtResponse(request->request_metadata().op_id());
auto* make_array_resp =
response->mutable_make_array_from_host_buffer_response();
make_array_resp->set_array_handle(handle);
return response;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleAssembleArrayFromSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& assemble_request =
request->assemble_array_from_single_device_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle :
assemble_request.single_device_array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(Shape shape, Shape::FromProto(assemble_request.shape()));
TF_ASSIGN_OR_RETURN(
auto sharding, Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
assemble_request.sharding()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
assemble_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(auto array, client_->AssembleArrayFromSingleDeviceArrays(
std::move(shape), std::move(sharding),
absl::MakeSpan(arrays), semantics));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
uint64_t handle = handle_generator_.New();
ifrt_resp->mutable_assemble_array_from_single_device_arrays_response()
->set_array_handle(handle);
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({handle, std::move(array)});
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleRemapArraysRequest(std::unique_ptr<IfrtRequest> request) {
const auto& remap_request = request->remap_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle : remap_request.array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(
RemapPlan plan,
RemapPlan::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
remap_request.plan()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
remap_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(
auto out_arrays,
client_->RemapArrays(plan, absl::MakeSpan(arrays), semantics));
int64_t num_arrays = out_arrays.size();
auto response = NewIfrtResponse(request->request_metadata().op_id());
auto* handles =
response->mutable_remap_arrays_response()->mutable_array_handles();
handles->Reserve(num_arrays);
uint64_t* handles_buf = handles->AddNAlreadyReserved(num_arrays);
handle_generator_.BulkNew(absl::MakeSpan(handles_buf, num_arrays));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < num_arrays; ++i) {
arrays_.insert({handles_buf[i], out_arrays[i]});
}
}
return response;
}
Future<BackendInterface::Response> IfrtBackend::HandleCopyToHostBufferRequest(
std::unique_ptr<IfrtRequest> request) {
const CopyToHostBufferRequest& copy_to_host =
request->copy_to_host_buffer_request();
auto array = GetArray(copy_to_host.array_handle());
if (!array.ok()) {
return Future<Response>(array.status());
}
std::optional<int> element_size = (*array)->dtype().byte_size();
if (element_size == std::nullopt) {
return Future<Response>(
absl::InternalError("Array element size is unknown."));
}
int64_t host_buffer_size =
(*array)->shape().num_elements() * element_size.value();
auto host_buffer = std::make_unique<std::string>();
host_buffer->resize(host_buffer_size);
const auto byte_strides = [&]() -> std::optional<std::vector<int64_t>> {
if (!copy_to_host.has_byte_strides()) {
return std::nullopt;
}
return FromByteStridesProto(copy_to_host.byte_strides());
}();
const auto mem_region = ArrayMemRegion::FromMinimalMemRegion(
absl::string_view(*host_buffer), (*array)->dtype(), (*array)->shape(),
byte_strides);
if (!mem_region.ok()) {
return Future<Response>(mem_region.status());
}
Future<> copy_status =
(*array)->CopyToHostBuffer(mem_region->zeroth_element(), byte_strides,
ArrayCopySemantics::kAlwaysCopy);
auto resp_promise = Future<BackendInterface::Response>::CreatePromise();
Future<BackendInterface::Response> resp_future(resp_promise);
auto on_ready = [this, op_id = request->request_metadata().op_id(),
host_buffer = std::move(host_buffer),
host_buffer_handle = copy_to_host.host_buffer_handle()](
absl::Status status) mutable
-> absl::StatusOr<std::unique_ptr<IfrtResponse>> {
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(
host_buffer_store_->Store(host_buffer_handle, *std::move(host_buffer)));
std::unique_ptr<IfrtResponse> response = NewIfrtResponse(op_id);
response->mutable_copy_to_host_buffer_response();
return response;
};
copy_status.OnReady(
[promise = std::move(resp_promise), on_ready = std::move(on_ready)](
absl::Status status) mutable { promise.Set(on_ready(status)); });
return resp_future;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDisassembleIntoSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request) {
TF_ASSIGN_OR_RETURN(
auto array,
GetArray(request->disassemble_into_single_device_arrays_request()
.array_handle()));
TF_ASSIGN_OR_RETURN(auto single_device_arrays,
array->DisassembleIntoSingleDeviceArrays(
xla::ifrt::ArrayCopySemantic | #include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include <sys/types.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
#include "tsl/protobuf/status.pb.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::_;
using ::testing::ByMove;
using ::testing::DoAll;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::Invoke;
using ::testing::Not;
using ::testing::NotNull;
using ::testing::Optional;
using ::testing::Pointee;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
#if defined(PLATFORM_GOOGLE)
using ::testing::EquivToProto;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
using ::testing::proto::Partially;
#endif
constexpr uint64_t kSessionId = 12345;
class IfrtBackendTest
: public ::testing::TestWithParam<int> {
protected:
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(GetParam());
return version;
}
};
std::unique_ptr<IfrtRequest> NewIfrtRequest(uint64_t op_id) {
auto ifrt_request = std::make_unique<IfrtRequest>();
auto* request_metadata = ifrt_request->mutable_request_metadata();
request_metadata->set_op_id(op_id);
return ifrt_request;
}
TEST_P(IfrtBackendTest, CreationFailsWithNullIfrtClient) {
EXPECT_THAT(IfrtBackend::Create(Version(), kSessionId, nullptr, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_P(IfrtBackendTest, SuccessfulCreation) {
auto ifrt_client = std::make_unique<MockClient>();
ASSERT_THAT(IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()),
IsOk());
}
TEST_P(IfrtBackendTest, ShutdownSucceeds) {
auto ifrt_client = std::make_unique<MockClient>();
TF_ASSERT_OK_AND_ASSIGN(
auto ifrt_backend,
IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()));
}
TEST_P(IfrtBackendTest, ProcessFailsWithNoRequestSet) {
auto ifrt_client = std::make_unique<MockClient>();
TF_ASSERT_OK_AND_ASSIGN(
auto ifrt_backend,
IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()));
auto request = std::make_unique<IfrtRequest>();
auto process_status = ifrt_backend->Process(std::move(request)).Await();
ASSERT_THAT(process_status, Not(IsOk()));
}
INSTANTIATE_TEST_SUITE_P(
IfrtBackendTestWithAllVersions, IfrtBackendTest,
testing::Range(kServerMinVersion, kServerMaxVersion + 1),
[](const testing::TestParamInfo<IfrtBackendTest::ParamType>& info) {
return absl::StrCat(info.param);
});
struct TestProgram : llvm::RTTIExtends<TestProgram, Program> {
static char ID;
};
[[maybe_unused]] char TestProgram::ID = 0;
class TestProgramSerDes : public llvm::RTTIExtends<TestProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::proxy::TestProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
CHECK(llvm::isa<TestProgram>(serializable));
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
return std::make_unique<TestProgram>();
}
static char ID;
};
[[maybe_unused]] char TestProgramSerDes::ID = 0;
struct TestCompileOptions
: llvm::RTTIExtends<TestCompileOptions, CompileOptions> {
static char ID;
};
[[maybe_unused]] char TestCompileOptions::ID = 0;
class TestCompileOptionsSerDes
: public llvm::RTTIExtends<TestCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::proxy::TestCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
CHECK(llvm::isa<TestCompileOptions>(serializable));
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
return std::make_unique<TestCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char TestCompileOptionsSerDes::ID = 0;
class IfrtBackendHandlerTest : public IfrtBackendTest {
protected:
static void SetUpTestSuite() {
RegisterSerDes<TestProgram>(std::make_unique<TestProgramSerDes>());
RegisterSerDes<TestCompileOptions>(
std::make_unique<TestCompileOptionsSerDes>());
}
void SetUp() override {
auto mock_client = std::make_unique<xla::ifrt::MockClient>();
std::vector<xla::ifrt::Device*> raw_device_ptrs;
for (int i = 0; i < 2; ++i) {
auto mock_device = std::make_unique<xla::ifrt::MockDevice>();
ON_CALL(*mock_device, Id()).WillByDefault(Return(DeviceId(i)));
raw_device_ptrs.push_back(mock_device.get());
mock_devices_.push_back(std::move(mock_device));
}
ON_CALL(*mock_client, devices()).WillByDefault(Return(raw_device_ptrs));
ON_CALL(*mock_client, LookupDevice(_))
.WillByDefault(
Invoke([this](DeviceId id) -> absl::StatusOr<xla::ifrt::Device*> {
if (id.value() < 0 || id.value() >= mock_devices_.size()) {
return absl::NotFoundError(
absl::StrCat("Unknown device id: ", id.value()));
}
return mock_devices_[id.value()].get();
}));
mock_client_ = mock_client.get();
EXPECT_CALL(*mock_client_, GetDefaultCompiler)
.WillRepeatedly(Return(&mock_compiler_));
host_buffer_store_ = std::make_shared<HostBufferStore>();
TF_ASSERT_OK_AND_ASSIGN(
backend_,
IfrtBackend::Create(Version(), kSessionId, std::move(mock_client),
host_buffer_store_));
}
absl::StatusOr<std::shared_ptr<IfrtResponse>> CallBackend(
std::unique_ptr<IfrtRequest> request) {
auto response_future = backend_->Process(std::move(request));
return std::move(response_future).Await();
}
uint64_t NewOpId() {
absl::MutexLock lock(&mu_);
return current_op_id_++;
}
uint64_t NewHostBufferHandle() { return current_host_buffer_handle_++; }
absl::StatusOr<uint64_t> MakeTestArray(tsl::RCReference<Array> mock_array) {
EXPECT_CALL(*mock_client_, MakeArrayFromHostBuffer(_, _, _, _, _, _, _))
.WillOnce(Return(std::move(mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
{
const uint64_t host_buffer_handle = NewHostBufferHandle();
TF_RETURN_IF_ERROR(
host_buffer_store_->Store(host_buffer_handle, "01234567"));
auto* make_array =
ifrt_request->mutable_make_array_from_host_buffer_request();
make_array->mutable_dtype()->set_kind(DTypeProto::KIND_S32);
make_array->mutable_shape()->add_dims(2);
make_array->set_host_buffer_handle(host_buffer_handle);
TF_ASSIGN_OR_RETURN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSIGN_OR_RETURN(
*make_array->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
TF_ASSIGN_OR_RETURN(auto make_array_response,
CallBackend(std::move(ifrt_request)));
TF_RETURN_IF_ERROR(tsl::StatusFromProto(
make_array_response->response_metadata().status()));
return make_array_response->make_array_from_host_buffer_response()
.array_handle();
}
absl::StatusOr<CompileResponse> CompileTestLoadedExecutable(
absl::StatusOr<std::unique_ptr<LoadedExecutable>> loaded_executable) {
auto request = NewIfrtRequest(NewOpId());
CompileRequest* compile_request = request->mutable_compile_request();
TestProgram program;
TF_ASSIGN_OR_RETURN(*compile_request->mutable_program(),
Serialize(program));
TestCompileOptions compile_options;
TF_ASSIGN_OR_RETURN(*compile_request->mutable_compile_options(),
Serialize(compile_options));
EXPECT_CALL(mock_compiler_, Compile(_, _))
.WillOnce(Return(ByMove(std::move(loaded_executable))));
TF_ASSIGN_OR_RETURN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
TF_RET_CHECK(response->has_compile_response());
return response->compile_response();
}
absl::Status CheckFuture(uint64_t handle) {
auto request = NewIfrtRequest(NewOpId());
request->mutable_check_future_request()->set_future_handle(handle);
TF_ASSIGN_OR_RETURN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
return tsl::StatusFromProto(response->response_metadata().status());
}
xla::ifrt::MockClient* mock_client_;
xla::ifrt::MockCompiler mock_compiler_;
std::vector<std::unique_ptr<xla::ifrt::MockDevice>> mock_devices_;
std::shared_ptr<HostBufferStore> host_buffer_store_;
private:
absl::Mutex mu_;
uint64_t current_op_id_ ABSL_GUARDED_BY(mu_) = 1;
uint64_t current_host_buffer_handle_ = 1;
std::unique_ptr<IfrtBackend> backend_;
};
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, Init) {
EXPECT_CALL(*mock_client_, platform_name())
.WillRepeatedly(Return("ifrt_backend"));
EXPECT_CALL(*mock_client_, platform_version()).WillRepeatedly(Return("n/a"));
EXPECT_CALL(*mock_client_, platform_id()).WillRepeatedly(Return(42));
EXPECT_CALL(*mock_client_, process_index()).WillRepeatedly(Return(1));
EXPECT_CALL(*mock_client_, runtime_type())
.WillRepeatedly(Return("ifrt-service"));
std::vector<std::vector<xla::ifrt::Device*>> mock_memory_devices;
mock_memory_devices.reserve(mock_devices_.size());
for (const auto& mock_device : mock_devices_) {
mock_memory_devices.push_back({mock_device.get()});
}
std::vector<MockMemory> mock_memories(mock_devices_.size());
MemoryKind kind("mock");
for (int i = 0; i < mock_memories.size(); ++i) {
MockMemory& memory = mock_memories[i];
EXPECT_CALL(memory, Devices())
.WillRepeatedly(Return(mock_memory_devices[i]));
EXPECT_CALL(memory, Id()).WillRepeatedly(Return(MemoryId(i)));
EXPECT_CALL(memory, Kind()).WillRepeatedly(ReturnRef(kind));
}
std::vector<std::vector<Memory*>> device_memories;
device_memories.reserve(mock_devices_.size());
for (int i = 0; i < mock_devices_.size(); ++i) {
device_memories.push_back({&mock_memories[i]});
}
using AttributeMap =
absl::flat_hash_map<std::string, xla::PjRtDeviceAttribute>;
std::vector<AttributeMap> device_attributes(mock_devices_.size());
for (int i = 0; i < mock_devices_.size(); ++i) {
device_attributes[i].insert({"name", absl::StrCat("device", i)});
MockDevice& mock_device = *mock_devices_[i];
EXPECT_CALL(mock_device, Kind()).WillRepeatedly(Return("mock"));
EXPECT_CALL(mock_device, Memories())
.WillRepeatedly(Return(device_memories[i]));
EXPECT_CALL(mock_device, DefaultMemory())
.WillRepeatedly(Return(&mock_memories[i]));
EXPECT_CALL(mock_device, Attributes())
.WillRepeatedly(ReturnRef(device_attributes[i]));
}
auto request = NewIfrtRequest(NewOpId());
request->mutable_init_request();
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(
Partially(IgnoringRepeatedFieldOrdering(EquivToProto(R"pb(
init_response {
session_id: 12345
platform_name: "ifrt_backend"
platform_version: "n/a"
platform_id: 42
process_index: 1
runtime_type: "ifrt-service"
devices {
id: 0
device_kind: "mock"
default_memory_id: 0
memory_ids: [ 0 ]
attributes {
key: "name"
value { string_value: "device0" }
}
}
devices {
id: 1
device_kind: "mock"
default_memory_id: 1
memory_ids: [ 1 ]
attributes {
key: "name"
value { string_value: "device1" }
}
}
memories {
id: 0
memory_space_kind: "mock"
device_ids: [ 0 ]
}
memories {
id: 1
memory_space_kind: "mock"
device_ids: [ 1 ]
}
}
)pb"))))));
}
#endif
TEST_P(IfrtBackendHandlerTest, DisassembleIntoSingleDeviceArraysSucceeds) {
std::vector<tsl::RCReference<xla::ifrt::Array>> single_device_arrays;
single_device_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
single_device_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
tsl::RCReference<xla::ifrt::MockArray> source_mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*source_mock_array, DisassembleIntoSingleDeviceArrays(_))
.WillOnce(Return(std::move(single_device_arrays)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(source_mock_array)));
auto disassemble_request = NewIfrtRequest(NewOpId());
disassemble_request->mutable_disassemble_into_single_device_arrays_request()
->set_array_handle(array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto disassemble_response,
CallBackend(std::move(disassemble_request)));
EXPECT_THAT(
disassemble_response->disassemble_into_single_device_arrays_response()
.single_device_array_handles(),
SizeIs(2));
}
TEST_P(IfrtBackendHandlerTest, MakeArrayFromHostBufferSuccess) {
const uint64_t kHostBufferHandle = 1234;
ASSERT_THAT(
host_buffer_store_->Store(kHostBufferHandle, std::string(480, 'a')),
IsOk());
auto ifrt_request = NewIfrtRequest(NewOpId());
{
auto* make_array =
ifrt_request->mutable_make_array_from_host_buffer_request();
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
dtype { kind: KIND_F64 }
shape { dims: [ 5, 3, 4 ] }
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
make_array));
make_array->set_host_buffer_handle(kHostBufferHandle);
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*make_array->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
const Shape expected_shape({5, 3, 4});
const std::vector<int64_t> expected_byte_strides_vec = {8, 40, 120};
const std::optional<absl::Span<const int64_t>> expected_byte_strides =
absl::Span<const int64_t>(expected_byte_strides_vec);
tsl::RCReference<xla::ifrt::MockArray> mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_client_,
MakeArrayFromHostBuffer(_, DType(DType::kF64), expected_shape,
expected_byte_strides, _, _, _))
.WillOnce(Return(std::move(mock_array)));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->make_array_from_host_buffer_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, AssembleArrayFromSingleDeviceArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
{
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
shape { dims: [ 2, 2 ] }
copy_semantics: ARRAY_COPY_SEMANTICS_ALWAYS_COPY
)pb",
ifrt_request
->mutable_assemble_array_from_single_device_arrays_request()));
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request
->mutable_assemble_array_from_single_device_arrays_request()
->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
std::vector<tsl::RCReference<xla::ifrt::MockArray>> single_device_arrays;
for (int i = 0; i < 2; ++i) {
auto array = tsl::MakeRef<xla::ifrt::MockArray>();
single_device_arrays.push_back(array);
TF_ASSERT_OK_AND_ASSIGN(uint64_t array_handle, MakeTestArray(array));
ifrt_request->mutable_assemble_array_from_single_device_arrays_request()
->add_single_device_array_handles(array_handle);
}
tsl::RCReference<xla::ifrt::MockArray> result =
tsl::MakeRef<xla::ifrt::MockArray>();
const Shape expected_shape({2, 2});
EXPECT_CALL(*mock_client_,
AssembleArrayFromSingleDeviceArrays(
expected_shape, _, ElementsAreArray(single_device_arrays), _))
.WillOnce(Return(std::move(result)));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->assemble_array_from_single_device_arrays_response()
.array_handle(),
0);
}
TEST_P(IfrtBackendHandlerTest, CopyToHostSuccess) {
Shape shape({5, 3, 4});
tsl::RCReference<xla::ifrt::MockArray> array =
tsl::MakeRef<xla::ifrt::MockArray>();
ON_CALL(*array, shape()).WillByDefault(ReturnRef(shape));
ON_CALL(*array, dtype()).WillByDefault(Return(DType(DType::kF64)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle, MakeTestArray(array));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* copy_to_host = ifrt_request->mutable_copy_to_host_buffer_request();
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
copy_to_host));
copy_to_host->set_array_handle(array_handle);
const uint64_t host_buffer_handle = NewHostBufferHandle();
copy_to_host->set_host_buffer_handle(host_buffer_handle);
const std::vector<int64_t> expected_byte_strides_vec = {8, 40, 120};
const std::optional<absl::Span<const int64_t>> expected_byte_strides =
absl::Span<const int64_t>(expected_byte_strides_vec);
EXPECT_CALL(*array, CopyToHostBuffer(_, expected_byte_strides, _))
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(host_buffer_store_->Lookup(host_buffer_handle),
IsOkAndHolds(Pointee(SizeIs(480))));
}
TEST_P(IfrtBackendHandlerTest, CopyToHostFailsWithNonExistentArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
ifrt_request->mutable_copy_to_host_buffer_request()));
ifrt_request->mutable_copy_to_host_buffer_request()->set_array_handle(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
DisassembleIntoSingleArrayFailsWhenBackendRuntimeFails) {
constexpr absl::string_view kDisassembleErrorMessage =
"Some test-injected error message that is unlikely to match other error "
"messages - 1234";
tsl::RCReference<xla::ifrt::MockArray> source_mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*source_mock_array, DisassembleIntoSingleDeviceArrays(_))
.WillOnce(Return(absl::UnknownError(kDisassembleErrorMessage)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(source_mock_array)));
auto disassemble_request = NewIfrtRequest(NewOpId());
disassemble_request->mutable_disassemble_into_single_device_arrays_request()
->set_array_handle(array_handle);
ASSERT_THAT(
CallBackend(std::move(disassemble_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq(kDisassembleErrorMessage)));
}
TEST_P(IfrtBackendHandlerTest, CopyArrays) {
std::vector<tsl::RCReference<xla::ifrt::Array>> src_arrays;
src_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
std::vector<tsl::RCReference<xla::ifrt::Array>> copied_arrays;
copied_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
DeviceList::Devices ds;
TF_ASSERT_OK_AND_ASSIGN(ds.emplace_back(),
mock_client_->LookupDevice(DeviceId(1)));
DeviceList devices(std::move(ds));
MemoryKind memory_kind("device");
EXPECT_CALL(
*mock_client_,
CopyArrays(ElementsAreArray(src_arrays), Optional(devices),
Optional(memory_kind), ArrayCopySemantics::kAlwaysCopy))
.WillOnce(Return(
std::vector<tsl::RCReference<xla::ifrt::Array>>(copied_arrays)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* copy_arrays_request = ifrt_request->mutable_copy_arrays_request();
for (const auto& src_array : src_arrays) {
TF_ASSERT_OK_AND_ASSIGN(auto src_array_handle, MakeTestArray(src_array));
copy_arrays_request->add_array_handles(src_array_handle);
}
for (const auto& device : devices.devices()) {
copy_arrays_request->add_device_ids(device->Id().value());
}
copy_arrays_request->set_memory_kind(std::string(*memory_kind.memory_kind()));
copy_arrays_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(response->response_metadata().status()),
IsOk());
EXPECT_THAT(response->copy_arrays_response().array_handles(),
SizeIs(copied_arrays.size()));
}
TEST_P(IfrtBackendHandlerTest, ReshardSuccess) {
auto src_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(0)));
auto src_sharding = SingleDeviceSharding::Create(device, MemoryKind());
ON_CALL(*src_mock_array, sharding()).WillByDefault(ReturnRef(*src_sharding));
TF_ASSERT_OK_AND_ASSIGN(auto src_array_handle,
MakeTestArray(std::move(src_mock_array)));
auto copied_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_client_, CopyArrays(_, _, _, _))
.WillOnce(Return(std::vector<tsl::RCReference<xla::ifrt::Array>>(
{copied_mock_array})));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(src_array_handle);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto* new_device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request->mutable_reshard_request()->mutable_sharding(),
SingleDeviceSharding::Create(new_device, MemoryKind())->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(response->response_metadata().status()),
IsOk());
EXPECT_NE(response->reshard_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, ReshardFailsWhenTheBackendFails) {
auto mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
auto sharding = SingleDeviceSharding::Create(device, MemoryKind());
ON_CALL(*mock_array, sharding()).WillByDefault(ReturnRef(*sharding));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
EXPECT_CALL(*mock_client_, CopyArrays(_, _, _, _))
.WillOnce(Return(absl::UnknownError("injected error")));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(array_handle);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto* new_device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request->mutable_reshard_request()->mutable_sharding(),
SingleDeviceSharding::Create(new_device, MemoryKind())->ToProto());
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
TEST_P(IfrtBackendHandlerTest, ReshardFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(0);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
reshard_request->mutable_sharding();
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, FullyReplicatedShardSuccess) {
auto fully_replicated_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
auto resultant_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*fully_replicated_mock_array, FullyReplicatedShard(_))
.WillOnce(Return(std::move(resultant_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto fully_replicated_array_handle,
MakeTestArray(std::move(fully_replicated_mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(
fully_replicated_array_handle);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->fully_replicated_shard_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, FullyReplicatedShardFailure) {
auto fully_replicated_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*fully_replicated_mock_array, FullyReplicatedShard(_))
.WillOnce(Return(absl::UnknownError("injected error")));
TF_ASSERT_OK_AND_ASSIGN(
auto fully_replicated_array_handle,
MakeTestArray(std::move(fully_replicated_mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(
fully_replicated_array_handle);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
TEST_P(IfrtBackendHandlerTest,
FullyReplicatedShardFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(0);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
CheckArrayReadyRequestRelaysTheResultFromBackend) {
auto mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
EXPECT_CALL(*mock_client_, GetReadyFuture(_))
.WillOnce(Return(Future<>(absl::OkStatus())))
.WillOnce(Return(Future<>(absl::UnknownError("injected error"))));
{
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_ |
1,810 | cpp | tensorflow/tensorflow | grpc_service_impl | third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl.cc | third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_SERVER_GRPC_SERVICE_IMPL_H_
#define XLA_PYTHON_IFRT_PROXY_SERVER_GRPC_SERVICE_IMPL_H_
#include <atomic>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/die_if_null.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/ifrt_backend.h"
namespace xla {
namespace ifrt {
namespace proxy {
class GrpcServiceImpl : public grpc::GrpcIfrtService::Service {
public:
using BackendFactory =
absl::AnyInvocable<absl::StatusOr<std::unique_ptr<BackendInterface>>(
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::proxy::HostBufferStore>
host_buffer_store)>;
explicit GrpcServiceImpl(BackendFactory backend_factory)
: backend_factory_(ABSL_DIE_IF_NULL(std::move(backend_factory))) {}
::grpc::Status GetVersion(::grpc::ServerContext* context,
const GrpcGetVersionRequest* request,
GrpcGetVersionResponse* response) override;
::grpc::Status IfrtSession(
::grpc::ServerContext* context,
::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>* stream) override;
::grpc::Status HostBufferStore(
::grpc::ServerContext* context,
::grpc::ServerReader<GrpcHostBufferStoreRequest>* stream,
GrpcHostBufferStoreResponse* response) override;
::grpc::Status HostBufferLookup(
::grpc::ServerContext* context,
const GrpcHostBufferLookupRequest* request,
::grpc::ServerWriter<GrpcHostBufferLookupResponse>* stream) override;
::grpc::Status HostBufferDelete(
::grpc::ServerContext* context,
const GrpcHostBufferDeleteRequest* request,
GrpcHostBufferDeleteResponse* response) override;
bool Test_InsertHostBufferStore(
uint64_t session_id,
std::shared_ptr<xla::ifrt::proxy::HostBufferStore> store);
bool Test_DeleteHostBufferStore(uint64_t session_id);
private:
absl::StatusOr<std::shared_ptr<xla::ifrt::proxy::HostBufferStore>>
GetHostBufferStore(uint64_t session_id)
ABSL_LOCKS_EXCLUDED(host_buffer_store_mu_);
BackendFactory backend_factory_;
std::atomic<uint64_t> next_session_id_ = 1;
absl::Mutex host_buffer_store_mu_;
absl::flat_hash_map<uint64_t,
std::shared_ptr<xla::ifrt::proxy::HostBufferStore>>
host_buffer_stores_ ABSL_GUARDED_BY(host_buffer_store_mu_);
};
}
}
}
#endif
#include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/pjrt/distributed/util.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/proto_util.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/version.h"
namespace xla {
namespace ifrt {
namespace proxy {
::grpc::Status GrpcServiceImpl::GetVersion(::grpc::ServerContext* context,
const GrpcGetVersionRequest* request,
GrpcGetVersionResponse* response) {
auto protocol_version =
ChooseVersion(request->min_version().protocol_version(),
request->max_version().protocol_version());
if (!protocol_version.ok()) {
return xla::ToGrpcStatus(protocol_version.status());
}
response->mutable_version()->set_protocol_version(*protocol_version);
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::IfrtSession(
::grpc::ServerContext* context,
::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>* stream) {
GrpcIfrtSessionMetadata metadata;
{
const auto it = context->client_metadata().find(
"ifrt-proxy-grpc-ifrt-session-metadata-bin");
if (it == context->client_metadata().end()) {
return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT,
"Missing metadata for GrpcIfrtService.IfrtSession: "
"ifrt-proxy-grpc-ifrt-session-metadata-bin");
}
if (!metadata.ParseFromString(AsProtoStringData(
absl::string_view(it->second.data(), it->second.size())))) {
return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT,
"Unable to parse GrpcIfrtSessionMetadata");
}
}
const uint64_t session_id =
next_session_id_.fetch_add(1, std::memory_order_relaxed);
VLOG(0) << "Starting a new IFRT session with session_id=" << session_id;
auto host_buffer_store =
std::make_shared<xla::ifrt::proxy::HostBufferStore>();
{
absl::MutexLock l(&host_buffer_store_mu_);
CHECK(host_buffer_stores_.insert({session_id, host_buffer_store}).second);
}
absl::Cleanup cleanup = [&] {
absl::MutexLock l(&host_buffer_store_mu_);
CHECK_GT(host_buffer_stores_.erase(session_id), 0);
};
auto backend = backend_factory_(metadata.version(), session_id,
std::move(host_buffer_store));
if (!backend.ok()) {
LOG(INFO) << "Creating IFRT backend " << session_id
<< " failed: " << backend.status();
return xla::ToGrpcStatus(backend.status());
}
absl::Mutex writer_mu;
bool first_request_read = false;
while (true) {
auto request = std::make_unique<IfrtRequest>();
if (!stream->Read(request.get())) {
break;
}
if (!first_request_read) {
VLOG(0) << "First request read for session " << session_id;
first_request_read = true;
}
const uint64_t op_id = request->request_metadata().op_id();
auto response = (*backend)->Process(std::move(request));
response.OnReady(
[op_id, stream,
&writer_mu](absl::StatusOr<std::shared_ptr<IfrtResponse>> response) {
absl::MutexLock l(&writer_mu);
if (response.ok()) {
stream->Write(**response);
} else {
stream->Write(*NewIfrtResponse(op_id, response.status()));
}
});
}
backend->reset();
VLOG(0) << "Finishing IFRT session " << session_id;
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::HostBufferStore(
::grpc::ServerContext* context,
::grpc::ServerReader<GrpcHostBufferStoreRequest>* stream,
GrpcHostBufferStoreResponse* response) {
const auto it = context->client_metadata().find(
"ifrt-proxy-grpc-host-buffer-store-metadata-bin");
if (it == context->client_metadata().end()) {
return ::grpc::Status(
::grpc::StatusCode::INTERNAL,
"Missing gRPC metadata for GrpcHostBufferService.Store");
}
GrpcHostBufferStoreMetadata metadata;
if (!metadata.ParseFromString(AsProtoStringData(
absl::string_view(it->second.data(), it->second.size())))) {
return ::grpc::Status(::grpc::StatusCode::DATA_LOSS,
"Unable to parse GrpcHostBufferStoreMetadata");
}
std::string data;
data.reserve(metadata.buffer_size());
GrpcHostBufferStoreRequest request;
while (stream->Read(&request)) {
data.append(request.data());
}
if (data.size() != metadata.buffer_size()) {
return ::grpc::Status(
::grpc::StatusCode::DATA_LOSS,
absl::StrCat("Potential data loss for host buffers: expected ",
metadata.buffer_size(), " bytes but got ", data.size(),
" bytes"));
}
auto store = GetHostBufferStore(metadata.session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
return xla::ToGrpcStatus((*store)->Store(metadata.handle(), std::move(data)));
}
::grpc::Status GrpcServiceImpl::HostBufferLookup(
::grpc::ServerContext* context, const GrpcHostBufferLookupRequest* request,
::grpc::ServerWriter<GrpcHostBufferLookupResponse>* stream) {
static constexpr int64_t kChunkSize = 1024 * 1024;
auto store = GetHostBufferStore(request->session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
auto data = (*store)->Lookup(request->handle());
if (!data.ok()) {
return xla::ToGrpcStatus(data.status());
}
GrpcHostBufferLookupResponse response;
if (!(*data)->empty()) {
for (int64_t offset = 0; offset < (*data)->size(); offset += kChunkSize) {
#if defined(PLATFORM_GOOGLE)
response.set_alias_data(
absl::string_view(**data).substr(offset, kChunkSize));
#else
response.set_data((*data)->substr(offset, kChunkSize));
#endif
stream->Write(response);
response.Clear();
}
} else {
stream->Write(response);
}
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::HostBufferDelete(
::grpc::ServerContext* context, const GrpcHostBufferDeleteRequest* request,
GrpcHostBufferDeleteResponse* response) {
auto store = GetHostBufferStore(request->session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
return xla::ToGrpcStatus((*store)->Delete(request->handle()));
}
bool GrpcServiceImpl::Test_InsertHostBufferStore(
uint64_t session_id,
std::shared_ptr<xla::ifrt::proxy::HostBufferStore> store) {
absl::MutexLock l(&host_buffer_store_mu_);
return host_buffer_stores_.insert({session_id, std::move(store)}).second;
}
bool GrpcServiceImpl::Test_DeleteHostBufferStore(uint64_t session_id) {
absl::MutexLock l(&host_buffer_store_mu_);
return host_buffer_stores_.erase(session_id) > 0;
}
absl::StatusOr<std::shared_ptr<xla::ifrt::proxy::HostBufferStore>>
GrpcServiceImpl::GetHostBufferStore(uint64_t session_id) {
absl::MutexLock l(&host_buffer_store_mu_);
const auto it = host_buffer_stores_.find(session_id);
if (it == host_buffer_stores_.end()) {
return absl::NotFoundError(
absl::StrCat("Session id ", session_id, " does not exist"));
}
return it->second;
}
}
}
} | #include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "xla/python/ifrt_proxy/client/grpc_host_buffer.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/grpc_server.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(kServerMaxVersion);
return version;
}
absl::StatusOr<std::unique_ptr<GrpcServer>> MakeGrpcServer() {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
return GrpcServer::CreateFromIfrtClientFactory(addr, []() {
return absl::UnimplementedError(
"IFRT client creation fails. This test is not expected to "
"instantiate any IFRT client");
});
}
TEST(GrpcServiceImplTest, CanBeUsedToSetupAnGrpcServer) {
ASSERT_THAT(MakeGrpcServer(), IsOk());
}
class GrpcIfrtServiceImplHostBufferTest
: public testing::TestWithParam<int64_t> {
protected:
GrpcIfrtServiceImplHostBufferTest()
: impl_([](IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<HostBufferStore> host_buffer_store) {
return absl::UnimplementedError(
"IFRT backend creation is not implemented");
}) {
::grpc::ServerBuilder builder;
builder.RegisterService(&impl_);
server_ = builder.BuildAndStart();
stub_ = grpc::GrpcIfrtService::NewStub(
server_->InProcessChannel(::grpc::ChannelArguments()));
}
std::string GetTestData() const {
std::string data;
for (int i = 0; i < GetParam(); ++i) {
data.push_back(i % 7);
}
return data;
}
GrpcServiceImpl impl_;
std::unique_ptr<::grpc::Server> server_;
std::shared_ptr<grpc::GrpcIfrtService::Stub> stub_;
};
TEST_P(GrpcIfrtServiceImplHostBufferTest, StoreAndLookupStringView) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
absl::string_view source(data);
ASSERT_THAT(client.Store(kHandle, source).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, StoreAndLookupCord) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
absl::Cord source(data);
ASSERT_THAT(client.Store(kHandle, source).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, Lookup) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
ASSERT_THAT(store->Store(kHandle, data), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, Delete) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
ASSERT_THAT(store->Store(kHandle, data), IsOk());
ASSERT_THAT(client.Delete(kHandle).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
INSTANTIATE_TEST_SUITE_P(
DataSize, GrpcIfrtServiceImplHostBufferTest,
testing::Values(0,
16,
3 * 1024 * 1024));
}
}
}
} |
1,811 | cpp | tensorflow/tensorflow | host_buffer | third_party/xla/xla/python/ifrt_proxy/server/host_buffer.cc | third_party/xla/xla/python/ifrt_proxy/server/host_buffer_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_SERVER_HOST_BUFFER_H_
#define XLA_PYTHON_IFRT_PROXY_SERVER_HOST_BUFFER_H_
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
namespace xla {
namespace ifrt {
namespace proxy {
class HostBufferStore {
public:
absl::Status Store(uint64_t handle, std::string data);
absl::StatusOr<std::shared_ptr<const std::string>> Lookup(uint64_t handle);
absl::Status Delete(uint64_t handle);
private:
absl::Mutex mu_;
absl::flat_hash_map<uint64_t, std::shared_ptr<const std::string>> buffers_
ABSL_GUARDED_BY(mu_);
};
}
}
}
#endif
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::Status HostBufferStore::Store(uint64_t handle, std::string data) {
absl::MutexLock lock(&mu_);
const bool inserted =
buffers_.insert({handle, std::make_shared<std::string>(std::move(data))})
.second;
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("Host buffer handle ", handle, " already exists"));
}
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<const std::string>> HostBufferStore::Lookup(
uint64_t handle) {
absl::MutexLock lock(&mu_);
const auto it = buffers_.find(handle);
if (it == buffers_.end()) {
return absl::NotFoundError(
absl::StrCat("Host buffer handle ", handle, " not found"));
}
return it->second;
}
absl::Status HostBufferStore::Delete(uint64_t handle) {
absl::MutexLock lock(&mu_);
if (buffers_.erase(handle) == 0) {
return absl::NotFoundError(
absl::StrCat("Host buffer handle ", handle, " not found"));
}
return absl::OkStatus();
}
}
}
} | #include "xla/python/ifrt_proxy/server/host_buffer.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Pointee;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
TEST(HostBufferStoreTest, ReadAfterWrite) {
HostBufferStore store;
const uint64_t kHandle = 1;
ASSERT_THAT(store.Store(kHandle, "foo"), IsOk());
EXPECT_THAT(store.Lookup(kHandle), IsOkAndHolds(Pointee(std::string("foo"))));
ASSERT_THAT(store.Delete(kHandle), IsOk());
EXPECT_THAT(store.Lookup(kHandle), StatusIs(absl::StatusCode::kNotFound));
}
TEST(HostBufferStoreTest, UnknownHandle) {
HostBufferStore store;
const uint64_t kHandle = 1;
EXPECT_THAT(store.Lookup(kHandle), StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(store.Delete(kHandle), StatusIs(absl::StatusCode::kNotFound));
}
}
}
}
} |
1,812 | cpp | tensorflow/tensorflow | host_callback | third_party/xla/xla/pjrt/host_callback.cc | third_party/xla/xla/pjrt/host_callback_test.cc | #ifndef XLA_PJRT_HOST_CALLBACK_H_
#define XLA_PJRT_HOST_CALLBACK_H_
#include <atomic>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/shape.h"
#include "tsl/platform/logging.h"
namespace xla {
bool ThisThreadIsInsideHostCallback();
void EnterHostCallback();
void LeaveHostCallback();
class ThreadSafePjRtChunkQueue {
public:
void Push(PjRtChunk chunk) {
absl::MutexLock lock(&mu_);
if (promises_.empty()) {
queue_.push_back(std::move(chunk));
return;
}
auto pop_promise = promises_.front();
pop_promise.Set(std::move(chunk));
promises_.pop_front();
}
PjRtFuture<PjRtChunk> Pop() {
absl::MutexLock lock(&mu_);
if (queue_.empty()) {
auto promise = PjRtFuture<PjRtChunk>::CreatePromise();
promises_.push_back(promise);
return PjRtFuture<PjRtChunk>(std::move(promise));
}
auto chunk = PjRtFuture<PjRtChunk>(std::move(queue_.front()));
queue_.pop_front();
return chunk;
}
private:
absl::Mutex mu_;
std::deque<PjRtChunk> queue_ ABSL_GUARDED_BY(mu_);
std::deque<PjRtFuture<PjRtChunk>::Promise> promises_ ABSL_GUARDED_BY(mu_);
};
struct HostCallbackArgInfo {
uint16_t channel_id;
Shape shape;
};
struct HostCallback {
std::vector<HostCallbackArgInfo> operands;
std::vector<HostCallbackArgInfo> results;
std::function<absl::Status(void**, void**)> callback;
};
class HostCallbackContext {
public:
HostCallbackContext(
HostCallback host_callback,
bool use_major_to_minor_data_layout_for_callbacks,
PjRtHostMemoryForDeviceManager* host_memory_for_device_manager)
: host_callback_(std::move(host_callback)),
use_major_to_minor_data_layout_for_callbacks_(
use_major_to_minor_data_layout_for_callbacks),
host_memory_for_device_manager_(host_memory_for_device_manager),
args_(host_callback_.operands.size()),
result_channels_(host_callback_.results.size()),
ready_count_(args_.size()) {
if (!use_major_to_minor_data_layout_for_callbacks_) {
CHECK(host_memory_for_device_manager_);
}
for (auto& channel : result_channels_) {
channel = std::make_unique<ThreadSafePjRtChunkQueue>();
}
}
absl::Status OnSend(int arg_num, const PjRtTransferMetadata& metadata,
PjRtChunk data);
void Receive(int res_num, const PjRtTransferMetadata& metadata,
std::unique_ptr<CopyToDeviceStream> stream);
const HostCallback& host_callback() const { return host_callback_; }
private:
HostCallback host_callback_;
bool use_major_to_minor_data_layout_for_callbacks_;
PjRtHostMemoryForDeviceManager* host_memory_for_device_manager_ = nullptr;
std::vector<PjRtChunk> args_;
std::vector<std::unique_ptr<ThreadSafePjRtChunkQueue>> result_channels_;
std::atomic<int> ready_count_;
};
struct HostCallbackStates {
std::vector<std::vector<std::unique_ptr<HostCallbackContext>>> contexts;
std::vector<std::vector<SendCallback>> send_callbacks;
std::vector<std::vector<RecvCallback>> recv_callbacks;
};
std::unique_ptr<HostCallbackContext>
CreateHostCallbackStateAndAppendSendRecvCallbacks(
HostCallback host_callback,
PjRtHostMemoryForDeviceManager* host_memory_for_device_manager,
std::vector<SendCallback>& send_callbacks,
std::vector<RecvCallback>& recv_callbacks,
bool use_major_to_minor_data_layout_for_callbacks);
}
#endif
#include "xla/pjrt/host_callback.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
static thread_local int on_send_guard = 0;
void EnterHostCallback() { ++on_send_guard; }
void LeaveHostCallback() { --on_send_guard; }
bool ThisThreadIsInsideHostCallback() { return on_send_guard > 0; }
absl::Status HostCallbackContext::OnSend(int arg_num,
const PjRtTransferMetadata& metadata,
PjRtChunk data) {
if (!use_major_to_minor_data_layout_for_callbacks_) {
const auto& arg_info = host_callback_.operands.at(arg_num);
const auto& host_shape = arg_info.shape;
const auto& device_shape = metadata.device_shape;
size_t host_size = ShapeUtil::ByteSizeOf(host_shape);
DCHECK_GE(data.size(), host_size);
auto delinearized = PjRtChunk::AllocateDefault(host_size);
TF_CHECK_OK(host_memory_for_device_manager_->ToHostLayout(
data.data(), data.size(), device_shape, delinearized.data(),
delinearized.size(), host_shape));
data = std::move(delinearized);
}
args_.at(arg_num) = std::move(data);
DCHECK_GE(ready_count_.load(), 1);
if (ready_count_.fetch_sub(1) != 1) {
return absl::OkStatus();
}
ready_count_.store(args_.size());
std::vector<void*> arg_ptrs;
arg_ptrs.reserve(args_.size());
for (auto& arg : args_) {
arg_ptrs.push_back(arg.data());
}
std::vector<PjRtChunk> results;
std::vector<void*> result_ptrs;
results.reserve(result_channels_.size());
result_ptrs.reserve(result_channels_.size());
for (int i = 0; i < result_channels_.size(); ++i) {
const auto& host_shape = host_callback_.results.at(i).shape;
size_t host_size = ShapeUtil::ByteSizeOf(host_shape);
results.push_back(PjRtChunk::AllocateDefault(host_size));
result_ptrs.push_back(results.back().data());
}
EnterHostCallback();
auto status = host_callback_.callback(result_ptrs.data(), arg_ptrs.data());
LeaveHostCallback();
for (auto& arg : args_) {
arg = PjRtChunk{};
}
for (int i = 0; i < result_channels_.size(); ++i) {
auto& result_channel = result_channels_[i];
result_channel->Push(std::move(results[i]));
}
return status;
}
void HostCallbackContext::Receive(int res_num,
const PjRtTransferMetadata& metadata,
std::unique_ptr<CopyToDeviceStream> stream) {
auto& result_channel = result_channels_.at(res_num);
result_channel->Pop().OnReady(
[this, res_num, metadata,
stream = std::move(stream)](absl::StatusOr<PjRtChunk> chunk) mutable {
TF_CHECK_OK(chunk.status());
if (!use_major_to_minor_data_layout_for_callbacks_) {
const auto& host_shape = host_callback_.results.at(res_num).shape;
const auto& device_shape = metadata.device_shape;
auto statusor_linearized =
host_memory_for_device_manager_->ToDeviceLayout(
chunk->data(), chunk->size(), host_shape, device_shape);
chunk = std::move(statusor_linearized.value());
}
stream->AddChunk(*std::move(chunk)).OnReady([](absl::Status s) {
TF_CHECK_OK(s);
});
});
}
std::unique_ptr<HostCallbackContext>
CreateHostCallbackStateAndAppendSendRecvCallbacks(
HostCallback host_callback,
PjRtHostMemoryForDeviceManager* host_memory_for_device_manager,
std::vector<SendCallback>& send_callbacks,
std::vector<RecvCallback>& recv_callbacks,
bool use_major_to_minor_data_layout_for_callbacks) {
auto context = std::make_unique<HostCallbackContext>(
std::move(host_callback), use_major_to_minor_data_layout_for_callbacks,
host_memory_for_device_manager);
const auto& hb = context->host_callback();
for (int arg_num = 0; arg_num < hb.operands.size(); ++arg_num) {
const auto& operand_info = hb.operands[arg_num];
send_callbacks.push_back(SendCallback{
operand_info.channel_id,
[arg_num, context = context.get()](
const PjRtTransferMetadata& metadata, PjRtChunk input,
size_t total_size_in_bytes, bool done) {
return context->OnSend(arg_num, metadata, std::move(input));
}});
}
for (int res_num = 0; res_num < hb.results.size(); ++res_num) {
const auto& result_info = hb.results[res_num];
recv_callbacks.push_back(RecvCallback{
result_info.channel_id,
[res_num, context = context.get()](
const PjRtTransferMetadata& metadata,
std::unique_ptr<CopyToDeviceStream> stream) {
context->Receive(res_num, metadata, std::move(stream));
}});
}
return context;
}
} | #include "xla/pjrt/host_callback.h"
#include <cstring>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class TestPjRtHostMemoryForDeviceManager
: public PjRtHostMemoryForDeviceManager {
public:
~TestPjRtHostMemoryForDeviceManager() override = default;
absl::StatusOr<PjRtChunk> ToDeviceLayout(const void* src_data,
size_t src_size,
const Shape& host_shape,
const Shape& device_shape) override {
auto chunk = PjRtChunk::AllocateDefault(src_size);
std::memcpy(chunk.data(), src_data, src_size);
return chunk;
}
absl::Status ToHostLayout(const void* src_data, size_t src_size,
const Shape& src_shape, void* dst_data,
size_t dst_size, const Shape& dst_shape) override {
CHECK_EQ(src_size, dst_size);
std::memcpy(dst_data, src_data, src_size);
return absl::OkStatus();
}
};
class TestStream : public CopyToDeviceStream {
public:
TestStream(int64_t total_bytes, int64_t granule_bytes, PjRtChunk& chunk,
absl::Notification& done)
: CopyToDeviceStream(total_bytes, granule_bytes),
chunk_(chunk),
done_(done) {}
PjRtFuture<> AddChunk(PjRtChunk chunk) override {
CHECK(!done_.HasBeenNotified());
chunk_ = std::move(chunk);
done_.Notify();
return PjRtFuture<>(absl::OkStatus());
}
private:
PjRtChunk& chunk_;
absl::Notification& done_;
};
TEST(HostCallbackTest, Basic) {
HostCallback host_callback;
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
size_t byte_size = ShapeUtil::ByteSizeOf(shape);
host_callback.operands = {HostCallbackArgInfo{1, shape}};
host_callback.results = {HostCallbackArgInfo{2, shape}};
host_callback.callback = [byte_size](void** outputs, void** inputs) {
std::memcpy(outputs[0], inputs[0], byte_size);
return absl::OkStatus();
};
HostCallbackStates states;
auto& send_callbacks = states.send_callbacks.emplace_back();
auto& recv_callbacks = states.recv_callbacks.emplace_back();
TestPjRtHostMemoryForDeviceManager test_host_memory_for_device_manager;
auto context = CreateHostCallbackStateAndAppendSendRecvCallbacks(
std::move(host_callback), &test_host_memory_for_device_manager,
send_callbacks, recv_callbacks,
false);
PjRtTransferMetadata metadata;
metadata.device_shape = shape;
auto literal = LiteralUtil::CreateR2({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto chunk = PjRtChunk::AllocateDefault(byte_size);
ASSERT_EQ(chunk.size(), literal.size_bytes());
std::memcpy(chunk.data(), literal.untyped_data(), literal.size_bytes());
TF_ASSERT_OK(context->OnSend(0, metadata, std::move(chunk)));
PjRtChunk received_chunk;
absl::Notification done;
auto stream = std::make_unique<TestStream>(byte_size, 8,
received_chunk, done);
context->Receive(0, metadata, std::move(stream));
done.WaitForNotification();
BorrowingLiteral borrowing_literal(
reinterpret_cast<const char*>(received_chunk.data()), shape);
EXPECT_TRUE(LiteralTestUtil::Equal(literal, borrowing_literal));
}
TEST(HostCallbackTest, NonBlockingRecv) {
HostCallback host_callback;
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
size_t byte_size = ShapeUtil::ByteSizeOf(shape);
host_callback.operands = {HostCallbackArgInfo{1, shape}};
host_callback.results = {HostCallbackArgInfo{2, shape}};
host_callback.callback = [byte_size](void** outputs, void** inputs) {
std::memcpy(outputs[0], inputs[0], byte_size);
return absl::OkStatus();
};
HostCallbackStates states;
auto& send_callbacks = states.send_callbacks.emplace_back();
auto& recv_callbacks = states.recv_callbacks.emplace_back();
TestPjRtHostMemoryForDeviceManager test_host_memory_for_device_manager;
auto context = CreateHostCallbackStateAndAppendSendRecvCallbacks(
std::move(host_callback), &test_host_memory_for_device_manager,
send_callbacks, recv_callbacks,
false);
PjRtTransferMetadata metadata;
metadata.device_shape = shape;
auto literal = LiteralUtil::CreateR2({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto chunk = PjRtChunk::AllocateDefault(byte_size);
ASSERT_EQ(chunk.size(), literal.size_bytes());
std::memcpy(chunk.data(), literal.untyped_data(), literal.size_bytes());
absl::Notification done;
PjRtChunk received_chunk;
auto stream = std::make_unique<TestStream>(byte_size, 8,
received_chunk, done);
context->Receive(0, metadata, std::move(stream));
TF_ASSERT_OK(context->OnSend(0, metadata, std::move(chunk)));
done.WaitForNotification();
BorrowingLiteral borrowing_literal(
reinterpret_cast<const char*>(received_chunk.data()), shape);
EXPECT_TRUE(LiteralTestUtil::Equal(literal, borrowing_literal));
}
}
} |
1,813 | cpp | tensorflow/tensorflow | array_spec | third_party/xla/xla/python/ifrt/array_spec.cc | third_party/xla/xla/python/ifrt/array_spec_test.cc | #ifndef XLA_PYTHON_IFRT_ARRAY_SPEC_H_
#define XLA_PYTHON_IFRT_ARRAY_SPEC_H_
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
namespace xla {
namespace ifrt {
struct ArraySpec {
DType dtype;
Shape shape;
std::shared_ptr<const Sharding> sharding;
static absl::StatusOr<ArraySpec> FromProto(
DeviceList::LookupDeviceFunc lookup_device, const ArraySpecProto& proto);
absl::StatusOr<ArraySpecProto> ToProto() const;
std::string DebugString() const;
};
}
}
#endif
#include "xla/python/ifrt/array_spec.h"
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
absl::StatusOr<ArraySpec> ArraySpec::FromProto(
DeviceList::LookupDeviceFunc lookup_device, const ArraySpecProto& proto) {
TF_ASSIGN_OR_RETURN(auto dtype, DType::FromProto(proto.dtype()));
TF_ASSIGN_OR_RETURN(auto shape, Shape::FromProto(proto.shape()));
TF_ASSIGN_OR_RETURN(auto sharding,
Sharding::FromProto(lookup_device, proto.sharding()));
return ArraySpec{dtype, std::move(shape),
std::move(sharding)};
}
absl::StatusOr<ArraySpecProto> ArraySpec::ToProto() const {
ArraySpecProto proto;
*proto.mutable_dtype() = dtype.ToProto();
*proto.mutable_shape() = shape.ToProto();
TF_ASSIGN_OR_RETURN(*proto.mutable_sharding(), sharding->ToProto());
return proto;
}
std::string ArraySpec::DebugString() const {
return absl::StrCat("ArraySpec(dtype=", dtype.DebugString(),
",shape=", shape.DebugString(),
",sharding=", sharding->DebugString(), ")");
}
}
} | #include "xla/python/ifrt/array_spec.h"
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/sharding_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class ArraySpecTest : public test_util::ShardingTest {};
TEST_P(ArraySpecTest, ToFromProto) {
auto device_list = GetDevices({0, 1});
DType dtype(DType::kS32);
Shape shape({4, 2});
Shape shard_shape({2, 2});
ArraySpec spec{dtype, shape,
ConcreteEvenSharding::Create(device_list, MemoryKind(),
shape,
shard_shape)};
auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> {
return client()->LookupDevice(device_id);
};
TF_ASSERT_OK_AND_ASSIGN(const ArraySpecProto proto, spec.ToProto());
TF_ASSERT_OK_AND_ASSIGN(const ArraySpec array_spec_copy,
ArraySpec::FromProto(lookup_device_func, proto));
EXPECT_EQ(array_spec_copy.dtype, dtype);
EXPECT_EQ(array_spec_copy.shape, shape);
const auto* sharding =
llvm::dyn_cast<ConcreteEvenSharding>(array_spec_copy.sharding.get());
ASSERT_NE(sharding, nullptr);
EXPECT_EQ(sharding->devices(), spec.sharding->devices());
EXPECT_EQ(sharding->memory_kind(), spec.sharding->memory_kind());
EXPECT_EQ(sharding->shape(), shape);
EXPECT_EQ(sharding->shard_shape(), shard_shape);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ArraySpecTest,
testing::Values(test_util::ShardingTestParam{
2,
2}));
}
}
} |
1,814 | cpp | tensorflow/tensorflow | serdes | third_party/xla/xla/python/ifrt/serdes.cc | third_party/xla/xla/python/ifrt/serdes_test.cc | #ifndef XLA_PYTHON_IFRT_SERDES_H_
#define XLA_PYTHON_IFRT_SERDES_H_
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
struct DeserializeOptions
: llvm::RTTIExtends<DeserializeOptions, llvm::RTTIRoot> {
static char ID;
};
class Serializable : public llvm::RTTIExtends<Serializable, llvm::RTTIRoot> {
public:
static char ID;
using DeserializeOptions = ::xla::ifrt::DeserializeOptions;
};
class SerDes : public llvm::RTTIExtends<SerDes, llvm::RTTIRoot> {
public:
virtual absl::string_view type_name() const = 0;
virtual absl::StatusOr<std::string> Serialize(Serializable& serializable) = 0;
virtual absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) = 0;
static char ID;
};
void RegisterSerDes(const void* type_id, std::unique_ptr<SerDes> serdes);
template <typename T>
void RegisterSerDes(std::unique_ptr<SerDes> serdes) {
static_assert(std::is_base_of_v<Serializable, T>,
"Types must implement `xla::ifrt::Serializable` to have a "
"serdes implementation");
RegisterSerDes(T::classID(), std::move(serdes));
}
namespace serdes_internal {
absl::StatusOr<std::unique_ptr<Serializable>> DeserializeUnchecked(
const Serialized& serialized, std::unique_ptr<DeserializeOptions> options);
}
absl::StatusOr<Serialized> Serialize(Serializable& serializable);
template <typename InterfaceType>
absl::StatusOr<std::unique_ptr<InterfaceType>> Deserialize(
const Serialized& serialized,
std::unique_ptr<typename InterfaceType::DeserializeOptions> options) {
TF_ASSIGN_OR_RETURN(auto result, serdes_internal::DeserializeUnchecked(
serialized, std::move(options)));
if (!llvm::isa<InterfaceType>(result.get())) {
return absl::InternalError(
"Unexpected Serializable type after deserialization");
}
return std::unique_ptr<InterfaceType>(
static_cast<InterfaceType*>(result.release()));
}
}
}
#endif
#include "xla/python/ifrt/serdes.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
struct Registry {
absl::Mutex mu;
absl::flat_hash_map<const void*, SerDes*> type_id_to_serdes
ABSL_GUARDED_BY(mu);
absl::flat_hash_map<absl::string_view, SerDes*> name_to_serdes
ABSL_GUARDED_BY(mu);
};
Registry* registry() {
static auto* r = new Registry();
return r;
}
}
char Serializable::ID = 0;
char DeserializeOptions::ID = 0;
char SerDes::ID = 0;
void RegisterSerDes(const void* type_id, std::unique_ptr<SerDes> serdes) {
Registry* const r = registry();
absl::MutexLock l(&r->mu);
CHECK(r->type_id_to_serdes.insert({type_id, serdes.get()}).second)
<< "xla::ifrt::SerDes cannot be registered more than once for the same "
"type id: "
<< type_id;
const absl::string_view name = serdes->type_name();
CHECK(r->name_to_serdes.insert({name, serdes.get()}).second)
<< "xla::ifrt::SerDes cannot be registered more than once for the same "
"name: "
<< name;
serdes.release();
}
absl::StatusOr<Serialized> Serialize(Serializable& serializable) {
SerDes* serdes;
{
Registry* const r = registry();
absl::MutexLock l(&r->mu);
auto it = r->type_id_to_serdes.find(serializable.dynamicClassID());
if (it == r->type_id_to_serdes.end()) {
return absl::UnimplementedError(
"Serialize call failed. Serializable has no associated SerDes "
"implementation");
}
serdes = it->second;
}
TF_ASSIGN_OR_RETURN(std::string data, serdes->Serialize(serializable));
Serialized proto;
proto.set_type_name(std::string(serdes->type_name()));
proto.set_data(std::move(data));
return proto;
}
namespace serdes_internal {
absl::StatusOr<std::unique_ptr<Serializable>> DeserializeUnchecked(
const Serialized& serialized, std::unique_ptr<DeserializeOptions> options) {
SerDes* serdes;
{
Registry* const r = registry();
absl::MutexLock l(&r->mu);
auto it = r->name_to_serdes.find(serialized.type_name());
if (it == r->name_to_serdes.end()) {
return absl::UnimplementedError(absl::StrCat(
"Deserialize call failed. Serializable has no associated SerDes ",
"implementation. type_name: ", serialized.type_name()));
}
serdes = it->second;
}
return serdes->Deserialize(serialized.data(), std::move(options));
}
}
}
} | #include "xla/python/ifrt/serdes.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::tsl::testing::StatusIs;
struct TestNumberDeserializeOptions;
struct TestNumber : llvm::RTTIExtends<TestNumber, Serializable> {
using DeserializeOptions = TestNumberDeserializeOptions;
int number;
explicit TestNumber(int number) : number(number) {}
static char ID;
};
[[maybe_unused]] char TestNumber::ID = 0;
struct TestNumberDeserializeOptions
: llvm::RTTIExtends<TestNumberDeserializeOptions, DeserializeOptions> {
absl::Status injected_failure;
static char ID;
};
[[maybe_unused]] char TestNumberDeserializeOptions::ID = 0;
class TestNumberSerDes : public llvm::RTTIExtends<TestNumberSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::TestNumber";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const TestNumber& obj = llvm::cast<TestNumber>(serializable);
return absl::StrCat(obj.number);
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
if (options != nullptr) {
auto* deserialize_options =
llvm::cast<TestNumberDeserializeOptions>(options.get());
TF_RETURN_IF_ERROR(deserialize_options->injected_failure);
}
int number;
if (!absl::SimpleAtoi(serialized, &number)) {
return absl::DataLossError("Unable to parse serialized TestNumber");
}
return std::make_unique<TestNumber>(number);
}
static char ID;
};
[[maybe_unused]] char TestNumberSerDes::ID = 0;
class TestNumberTest : public testing::Test {
protected:
static void SetUpTestSuite() {
RegisterSerDes<TestNumber>(std::make_unique<TestNumberSerDes>());
}
};
TEST_F(TestNumberTest, RoundTrip) {
auto obj = std::make_unique<TestNumber>(1234);
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*obj));
TF_ASSERT_OK_AND_ASSIGN(
auto deserialized,
Deserialize<TestNumber>(serialized, nullptr));
EXPECT_EQ(obj->number, deserialized->number);
}
TEST_F(TestNumberTest, WithOptions) {
auto obj = std::make_unique<TestNumber>(1234);
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*obj));
auto options = std::make_unique<TestNumberDeserializeOptions>();
options->injected_failure = absl::InternalError("injected failure");
EXPECT_THAT(Deserialize<TestNumber>(serialized, std::move(options)),
StatusIs(absl::StatusCode::kInternal, "injected failure"));
}
}
}
} |
1,815 | cpp | tensorflow/tensorflow | memory | third_party/xla/xla/python/ifrt/memory.cc | third_party/xla/xla/python/ifrt/memory_test.cc | #ifndef XLA_PYTHON_IFRT_MEMORY_H_
#define XLA_PYTHON_IFRT_MEMORY_H_
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/device.h"
namespace xla {
namespace ifrt {
class MemoryKind {
public:
MemoryKind() = default;
explicit MemoryKind(std::optional<absl::string_view> memory_kind);
bool operator==(const MemoryKind& other) const {
if (!memory_kind_.has_value() && !other.memory_kind_.has_value()) {
return true;
}
if (memory_kind_.has_value() && other.memory_kind_.has_value() &&
memory_kind_->data() == other.memory_kind_->data()) {
return true;
}
return false;
}
bool operator!=(const MemoryKind& other) const { return !(*this == other); }
template <typename H>
friend H AbslHashValue(H h, const MemoryKind& memory_kind) {
return H::combine(std::move(h), memory_kind.memory_kind_);
}
std::optional<absl::string_view> memory_kind() const { return memory_kind_; }
std::string DebugString() const;
private:
std::optional<absl::string_view> memory_kind_;
};
MemoryKind CanonicalizeMemoryKind(MemoryKind memory_kind, Device* device);
TSL_LIB_GTL_DEFINE_INT_TYPE(MemoryId, int32_t);
class Memory : public llvm::RTTIExtends<Memory, llvm::RTTIRoot> {
public:
Memory() = default;
Memory(const Memory&) = delete;
Memory(Memory&&) = delete;
Memory& operator=(const Memory&) = delete;
Memory& operator=(Memory&&) = delete;
virtual MemoryId Id() const = 0;
virtual const MemoryKind& Kind() const = 0;
virtual absl::string_view ToString() const = 0;
virtual absl::string_view DebugString() const = 0;
virtual absl::Span<Device* const> Devices() const = 0;
static char ID;
};
}
}
#endif
#include "xla/python/ifrt/memory.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/container/node_hash_set.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/python/ifrt/device.h"
namespace xla {
namespace ifrt {
namespace {
struct MemoryKindsSet {
absl::Mutex mu;
absl::node_hash_set<std::string> memory_kinds_set ABSL_GUARDED_BY(mu);
};
}
MemoryKind::MemoryKind(std::optional<absl::string_view> memory_kind) {
static auto* const global_set = new MemoryKindsSet();
if (!memory_kind.has_value()) {
return;
}
absl::MutexLock lock(&global_set->mu);
auto it = global_set->memory_kinds_set.find(*memory_kind);
if (it == global_set->memory_kinds_set.end()) {
memory_kind_ =
*global_set->memory_kinds_set.insert(std::string(*memory_kind)).first;
} else {
memory_kind_ = *it;
}
}
std::string MemoryKind::DebugString() const {
if (memory_kind_.has_value()) {
return std::string(*memory_kind_);
}
return "(default)";
}
MemoryKind CanonicalizeMemoryKind(MemoryKind memory_kind, Device* device) {
if (memory_kind.memory_kind().has_value()) {
return memory_kind;
}
auto default_memory = device->DefaultMemory();
if (default_memory.ok()) {
return (*default_memory)->Kind();
}
return MemoryKind();
}
char Memory::ID = 0;
}
} | #include "xla/python/ifrt/memory.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
using ::testing::Optional;
namespace xla {
namespace ifrt {
namespace {
TEST(MemoryKindTest, EqualityForUnspecified) {
MemoryKind memory_kind1;
MemoryKind memory_kind2;
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, EqualityForSameString) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2("abc");
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, EqualityForSameStringContent) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2(absl::StrCat("ab", "c"));
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, InequalityForDifferentStringContent) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2("def");
EXPECT_NE(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, InequalityBetweenSpecifiedAndUnspecified) {
{
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2;
EXPECT_NE(memory_kind1, memory_kind2);
}
{
MemoryKind memory_kind1;
MemoryKind memory_kind2("abc");
EXPECT_NE(memory_kind1, memory_kind2);
}
}
TEST(MemoryKindTest, MemorySafety) {
auto memory_kind_str = std::make_unique<std::string>("abc");
MemoryKind memory_kind(*memory_kind_str);
memory_kind_str.reset();
EXPECT_THAT(memory_kind.memory_kind(), Optional(absl::string_view("abc")));
}
TEST(MemoryKindTest, EqualityForUnspecifiedAndNullopt) {
MemoryKind memory_kind1;
MemoryKind memory_kind2(std::nullopt);
EXPECT_EQ(memory_kind1, memory_kind2);
}
}
}
} |
1,816 | cpp | tensorflow/tensorflow | index_domain | third_party/xla/xla/python/ifrt/index_domain.cc | third_party/xla/xla/python/ifrt/index_domain_test.cc | #ifndef XLA_PYTHON_IFRT_INDEX_DOMAIN_H_
#define XLA_PYTHON_IFRT_INDEX_DOMAIN_H_
#include <cstdint>
#include <ostream>
#include <string>
#include <utility>
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/shape.h"
namespace xla {
namespace ifrt {
class IndexDomain {
public:
IndexDomain(Index origin, Shape shape)
: origin_(std::move(origin)), shape_(std::move(shape)) {}
explicit IndexDomain(Shape shape)
: origin_(Index::Zeros(shape.dims().size())), shape_(std::move(shape)) {}
IndexDomain(const IndexDomain&) = default;
IndexDomain(IndexDomain&&) = default;
IndexDomain& operator=(const IndexDomain&) = default;
IndexDomain& operator=(IndexDomain&&) = default;
const Index& origin() const { return origin_; }
const Shape& shape() const { return shape_; }
bool operator==(const IndexDomain& other) const {
return origin_ == other.origin_ && shape_ == other.shape_;
}
bool operator!=(const IndexDomain& other) const {
return origin_ != other.origin_ || shape_ != other.shape_;
}
IndexDomain operator+(const Index& offset) const {
return IndexDomain(origin_ + offset, shape_);
}
IndexDomain operator-(const Index& offset) const {
return IndexDomain(origin_ - offset, shape_);
}
IndexDomain& operator+=(const Index& offset) {
origin_ += offset;
return *this;
}
IndexDomain& operator-=(const Index& offset) {
origin_ -= offset;
return *this;
}
std::string DebugString() const;
private:
Index origin_;
Shape shape_;
};
std::ostream& operator<<(std::ostream& os, const IndexDomain& index_domain);
}
}
#endif
#include "xla/python/ifrt/index_domain.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace xla {
namespace ifrt {
std::string IndexDomain::DebugString() const {
return absl::StrCat("IndexDomain(origin=", origin_.DebugString(),
",shape=", shape_.DebugString(), ")");
}
std::ostream& operator<<(std::ostream& os, const IndexDomain& index_domain) {
return os << index_domain.DebugString();
}
}
} | #include "xla/python/ifrt/index_domain.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
namespace xla {
namespace ifrt {
namespace {
TEST(IndexDomainTest, Construction) {
IndexDomain a(Index({1, 2}), Shape({3, 4}));
EXPECT_EQ(a.origin(), Index({1, 2}));
EXPECT_EQ(a.shape(), Shape({3, 4}));
IndexDomain b(Shape({3, 4}));
EXPECT_EQ(b.origin(), Index({0, 0}));
EXPECT_EQ(b.shape(), Shape({3, 4}));
}
TEST(IndexDomainTest, Operations) {
IndexDomain a(Index({1, 2}), Shape({3, 4}));
Index b({1, 2});
EXPECT_EQ(a + b, IndexDomain(Index({2, 4}), Shape({3, 4})));
{
IndexDomain c = a;
EXPECT_EQ(c += b, IndexDomain(Index({2, 4}), Shape({3, 4})));
}
EXPECT_EQ(a - b, IndexDomain(Index({0, 0}), Shape({3, 4})));
{
IndexDomain c = a;
EXPECT_EQ(c -= b, IndexDomain(Index({0, 0}), Shape({3, 4})));
}
}
}
}
} |
1,817 | cpp | tensorflow/tensorflow | dtype | third_party/xla/xla/python/ifrt/dtype.cc | third_party/xla/xla/python/ifrt/dtype_test.cc | #ifndef XLA_PYTHON_IFRT_DTYPE_H_
#define XLA_PYTHON_IFRT_DTYPE_H_
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/statusor.h"
#include "xla/python/ifrt/dtype.pb.h"
namespace xla {
namespace ifrt {
class DType {
public:
enum Kind {
kInvalid = 0,
kPred = 1,
kS2 = 26,
kS4 = 21,
kS8 = 2,
kS16 = 3,
kS32 = 4,
kS64 = 5,
kU2 = 27,
kU4 = 22,
kU8 = 6,
kU16 = 7,
kU32 = 8,
kU64 = 9,
kF16 = 10,
kF32 = 11,
kF64 = 12,
kBF16 = 16,
kC64 = 15,
kC128 = 18,
kToken = 17,
kF8E4M3FN = 20,
kF8E4M3B11FNUZ = 23,
kF8E4M3FNUZ = 25,
kF8E5M2 = 19,
kF8E5M2FNUZ = 24,
kString = 99,
};
explicit DType(Kind kind) : kind_(kind) {}
DType(const DType&) = default;
DType(DType&&) = default;
DType& operator=(const DType&) = default;
DType& operator=(DType&&) = default;
Kind kind() const { return kind_; }
bool operator==(const DType& other) const { return kind_ == other.kind_; }
bool operator!=(const DType& other) const { return kind_ != other.kind_; }
template <typename H>
friend H AbslHashValue(H h, const DType& value) {
return H::combine(std::move(h), value.kind());
}
std::optional<int> byte_size() const;
std::optional<int> bit_size() const;
static absl::StatusOr<DType> FromProto(const DTypeProto& proto);
DTypeProto ToProto() const;
std::string DebugString() const;
private:
Kind kind_;
};
std::ostream& operator<<(std::ostream& os, const DType& dtype);
}
}
#endif
#include "xla/python/ifrt/dtype.h"
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt/dtype.pb.h"
namespace xla {
namespace ifrt {
std::optional<int> DType::byte_size() const {
switch (kind_) {
case kPred:
case kS8:
case kU8:
return 1;
case kS16:
case kU16:
case kF16:
case kBF16:
return 2;
case kS32:
case kU32:
case kF32:
return 4;
case kS64:
case kU64:
case kF64:
case kC64:
return 8;
case kC128:
return 16;
default:
return std::nullopt;
}
}
std::optional<int> DType::bit_size() const {
switch (kind_) {
case kPred:
case kS8:
case kU8:
return 8;
case kS16:
case kU16:
case kF16:
case kBF16:
return 16;
case kS32:
case kU32:
case kF32:
return 32;
case kS64:
case kU64:
case kF64:
case kC64:
return 64;
case kC128:
return 128;
default:
return std::nullopt;
}
}
absl::StatusOr<DType> DType::FromProto(const DTypeProto& dtype_proto) {
switch (dtype_proto.kind()) {
case DTypeProto::KIND_PRED:
return DType(DType::Kind::kPred);
case DTypeProto::KIND_TOKEN:
return DType(DType::Kind::kToken);
#define CASE(X) \
case DTypeProto::KIND_##X: \
return DType(DType::Kind::k##X);
CASE(S4);
CASE(S8);
CASE(S16);
CASE(S32);
CASE(S64);
CASE(U4);
CASE(U8);
CASE(U16);
CASE(U32);
CASE(U64);
CASE(F16);
CASE(F32);
CASE(F64);
CASE(BF16);
CASE(C64);
CASE(C128);
CASE(F8E4M3FN);
CASE(F8E4M3B11FNUZ);
CASE(F8E4M3FNUZ);
CASE(F8E5M2);
CASE(F8E5M2FNUZ);
#undef CASE
case DTypeProto::KIND_STRING:
return DType(DType::Kind::kString);
default:
return DType(DType::Kind::kInvalid);
}
}
DTypeProto DType::ToProto() const {
DTypeProto dtype_proto;
switch (kind()) {
case DType::Kind::kPred:
dtype_proto.set_kind(DTypeProto::KIND_PRED);
break;
case DType::Kind::kToken:
dtype_proto.set_kind(DTypeProto::KIND_TOKEN);
break;
#define CASE(X) \
case DType::Kind::k##X: \
dtype_proto.set_kind(DTypeProto::KIND_##X); \
break;
CASE(S4);
CASE(S8);
CASE(S16);
CASE(S32);
CASE(S64);
CASE(U4);
CASE(U8);
CASE(U16);
CASE(U32);
CASE(U64);
CASE(F16);
CASE(F32);
CASE(F64);
CASE(BF16);
CASE(C64);
CASE(C128);
CASE(F8E4M3FN);
CASE(F8E4M3B11FNUZ);
CASE(F8E4M3FNUZ);
CASE(F8E5M2);
CASE(F8E5M2FNUZ);
#undef CASE
case DType::Kind::kString:
dtype_proto.set_kind(DTypeProto::KIND_STRING);
break;
default:
dtype_proto.set_kind(DTypeProto::KIND_UNSPECIFIED);
break;
}
return dtype_proto;
}
std::string DType::DebugString() const {
switch (kind_) {
case kInvalid:
return "INVALID";
case kPred:
return "PRED";
case kS8:
return "S8";
case kS16:
return "S16";
case kS32:
return "S32";
case kS64:
return "S64";
case kU8:
return "U8";
case kU16:
return "U16";
case kU32:
return "U32";
case kU64:
return "U64";
case kF16:
return "F16";
case kF32:
return "F32";
case kF64:
return "F64";
case kBF16:
return "BF16";
case kC64:
return "C64";
case kC128:
return "C128";
case kToken:
return "TOKEN";
case kString:
return "STRING";
default:
return absl::StrCat("UNKNOWN(", static_cast<int>(kind_), ")");
}
}
std::ostream& operator<<(std::ostream& os, const DType& dtype) {
return os << dtype.DebugString();
}
}
} | #include "xla/python/ifrt/dtype.h"
#include <gtest/gtest.h>
#include "xla/python/ifrt/dtype.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
TEST(DTypeTest, FromToFromProto) {
for (int i = 0; i < DTypeProto::Kind_descriptor()->value_count(); ++i) {
DTypeProto proto;
proto.set_kind(static_cast<DTypeProto::Kind>(
DTypeProto::Kind_descriptor()->value(i)->number()));
TF_ASSERT_OK_AND_ASSIGN(DType dtype, DType::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(DType dtype_copy,
DType::FromProto(dtype.ToProto()));
EXPECT_EQ(dtype_copy, dtype);
}
}
}
}
} |
1,818 | cpp | tensorflow/tensorflow | remap_plan | third_party/xla/xla/python/ifrt/remap_plan.cc | third_party/xla/xla/python/ifrt/remap_plan_test.cc | #ifndef XLA_PYTHON_IFRT_REMAP_PLAN_H_
#define XLA_PYTHON_IFRT_REMAP_PLAN_H_
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/remap_plan.pb.h"
namespace xla {
namespace ifrt {
struct RemapPlan {
struct Interval {
int64_t start;
int64_t end;
int64_t step;
bool operator==(const Interval& other) const {
return start == other.start && end == other.end && step == other.step;
}
std::string DebugString() const;
};
struct Mapping {
int in_array;
int out_array;
std::vector<Interval> from;
std::vector<Interval> to;
bool operator==(const Mapping& other) const {
return in_array == other.in_array && out_array == other.out_array &&
from == other.from && to == other.to;
}
std::string DebugString() const;
};
std::vector<ArraySpec> input_specs;
std::vector<ArraySpec> output_specs;
std::shared_ptr<std::vector<Mapping>> mappings;
absl::Status Validate() const;
static absl::StatusOr<RemapPlan> FromProto(
DeviceList::LookupDeviceFunc lookup_device, const RemapPlanProto& proto);
absl::StatusOr<RemapPlanProto> ToProto() const;
std::string DebugString() const;
};
}
}
#endif
#include "xla/python/ifrt/remap_plan.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/remap_plan.pb.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::StatusOr<RemapPlan::Mapping> MappingFromProto(
const RemapPlanProto::MappingProto& mapping_proto) {
RemapPlan::Mapping mapping;
mapping.in_array = mapping_proto.in_array();
mapping.out_array = mapping_proto.out_array();
const int64_t num_intervals = mapping_proto.from_start_size();
TF_RET_CHECK(mapping_proto.from_end_size() == num_intervals);
TF_RET_CHECK(mapping_proto.from_step_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_start_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_end_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_step_size() == num_intervals);
mapping.from.reserve(num_intervals);
mapping.to.reserve(num_intervals);
for (int64_t i = 0; i < num_intervals; ++i) {
mapping.from.push_back(
RemapPlan::Interval{mapping_proto.from_start(i),
mapping_proto.from_end(i),
mapping_proto.from_step(i)});
mapping.to.push_back(
RemapPlan::Interval{mapping_proto.to_start(i),
mapping_proto.to_end(i),
mapping_proto.to_step(i)});
}
return mapping;
}
absl::StatusOr<RemapPlanProto::MappingProto> MappingToProto(
const RemapPlan::Mapping& mapping) {
TF_RET_CHECK(mapping.from.size() == mapping.to.size());
RemapPlanProto::MappingProto proto;
proto.set_in_array(mapping.in_array);
proto.set_out_array(mapping.out_array);
const int64_t num_intervals = mapping.from.size();
proto.mutable_from_start()->Reserve(num_intervals);
proto.mutable_from_end()->Reserve(num_intervals);
proto.mutable_from_step()->Reserve(num_intervals);
proto.mutable_to_start()->Reserve(num_intervals);
proto.mutable_to_end()->Reserve(num_intervals);
proto.mutable_to_step()->Reserve(num_intervals);
for (int64_t i = 0; i < mapping.from.size(); ++i) {
proto.add_from_start(mapping.from[i].start);
proto.add_from_end(mapping.from[i].end);
proto.add_from_step(mapping.from[i].step);
proto.add_to_start(mapping.to[i].start);
proto.add_to_end(mapping.to[i].end);
proto.add_to_step(mapping.to[i].step);
}
return proto;
}
absl::Status CheckRange(int64_t num_shards,
const RemapPlan::Interval& interval) {
if (interval.start < 0 || interval.start > num_shards - 1) {
return InvalidArgument("start must be in [0, %d], but is %d",
num_shards - 1, interval.start);
}
if (interval.end < 0 || interval.end > num_shards) {
return InvalidArgument("end must be in [0, %d], but is %d", num_shards,
interval.end);
}
if (interval.step <= 0) {
return InvalidArgument("step must be positive, but is %d", interval.step);
}
return absl::OkStatus();
}
int64_t GetNumberOfSteps(const RemapPlan::Interval& interval) {
return (interval.end - interval.start + interval.step - 1) / interval.step;
}
}
std::string RemapPlan::Interval::DebugString() const {
return absl::StrCat("[", start, ":", end, ":", step, "]");
}
std::string RemapPlan::Mapping::DebugString() const {
auto format_intervals = [](absl::Span<const RemapPlan::Interval> intervals) {
return absl::StrCat(
"[",
absl::StrJoin(
intervals, ",",
[](std::string* out, const RemapPlan::Interval& interval) {
absl::StrAppend(out, interval.DebugString());
}),
"]");
};
return absl::StrCat("Mapping(in_array=", in_array, ",",
"out_array=", out_array, ",from=", format_intervals(from),
",to=", format_intervals(to), ")");
}
absl::Status RemapPlan::Validate() const {
const int num_inputs = input_specs.size();
if (num_inputs == 0) {
return InvalidArgument("Must have at least one input");
}
for (int i = 0; i < num_inputs; ++i) {
if (input_specs[i].dtype != input_specs.front().dtype) {
return InvalidArgument(
"Input must have the same dtype: %s (input 0) vs. %s (input "
"%d)",
input_specs.front().dtype.DebugString(),
input_specs[i].dtype.DebugString(), i);
}
}
const int num_outputs = output_specs.size();
for (int i = 0; i < num_outputs; ++i) {
if (output_specs[i].dtype != input_specs.front().dtype) {
return InvalidArgument(
"Input and output must have the same dtype: %s (input 0) vs. %s "
"(output %d)",
output_specs.front().dtype.DebugString(),
output_specs[i].dtype.DebugString(), i);
}
}
std::vector<std::vector<bool>> in_used_buffers_list(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
in_used_buffers_list[i].resize(
input_specs[i].sharding->devices().size(),
false);
}
std::vector<DeviceList::Devices> out_assigned_devices_list(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
out_assigned_devices_list[i].resize(
output_specs[i].sharding->devices().size(),
nullptr);
}
for (int64_t i = 0; i < mappings->size(); ++i) {
const RemapPlan::Mapping& mapping = (*mappings)[i];
if (mapping.in_array < 0 || mapping.in_array >= num_inputs) {
return InvalidArgument(
"mappings[%d].in_array must be in [0, %d], but is %d", i,
num_inputs - 1, mapping.in_array);
}
if (mapping.out_array < 0 || mapping.out_array >= num_outputs) {
return InvalidArgument(
"mappings[%d].out_array must be in [0, %d], but is %d", i,
num_outputs - 1, mapping.out_array);
}
if (mapping.from.size() != mapping.to.size()) {
return InvalidArgument(
"mappings[%d].from and mappings[%d].to must have the same number of "
"intervals, but has %d and %d intervals",
i, i, mapping.from.size(), mapping.to.size());
}
std::vector<bool>& in_used_buffers = in_used_buffers_list[mapping.in_array];
const DeviceList& in_devices =
input_specs[mapping.in_array].sharding->devices();
DeviceList::Devices& out_assigned_devices =
out_assigned_devices_list[mapping.out_array];
const int64_t in_shards_count = in_used_buffers.size();
const int64_t out_shards_count = out_assigned_devices.size();
for (int s = 0; s < mapping.from.size(); ++s) {
const RemapPlan::Interval& in_interval = mapping.from[s];
const RemapPlan::Interval& out_interval = mapping.to[s];
TF_RETURN_IF_ERROR(CheckRange(in_shards_count, in_interval));
TF_RETURN_IF_ERROR(CheckRange(out_shards_count, out_interval));
if (GetNumberOfSteps(in_interval) != GetNumberOfSteps(out_interval)) {
return InvalidArgument(
"mappings[%d].from[%d] and mappings[%d].to[%d] must have the same "
"number of steps, but were %d and %d "
"(%s vs. %s)",
i, s, i, s, GetNumberOfSteps(in_interval),
GetNumberOfSteps(out_interval), in_interval.DebugString(),
out_interval.DebugString());
}
int64_t in_shard = in_interval.start;
int64_t out_shard = out_interval.start;
while (in_shard < in_interval.end) {
if (in_used_buffers[in_shard]) {
return InvalidArgument("Input array %d shard %d is already used",
mapping.in_array, in_shard);
}
in_used_buffers[in_shard] = true;
if (out_assigned_devices[out_shard] != nullptr) {
return InvalidArgument("Output array %d shard %d is already assigned",
mapping.out_array, out_shard);
}
out_assigned_devices[out_shard] = in_devices[in_shard];
in_shard += in_interval.step;
out_shard += out_interval.step;
}
}
}
for (int i = 0; i < num_outputs; ++i) {
for (int out_shard = 0;
out_shard < output_specs[i].sharding->devices().size(); ++out_shard) {
if (out_assigned_devices_list[i][out_shard] == nullptr) {
return InvalidArgument("Output array %d shard %d is unassigned", i,
out_shard);
}
}
if (out_assigned_devices_list[i] !=
output_specs[i].sharding->devices().devices()) {
return InvalidArgument(
"Output array %d devices and sharding devices do not match: "
"Expected %s, but got %s",
i, output_specs[i].sharding->devices().DebugString(),
DeviceList(std::move(out_assigned_devices_list[i])).DebugString());
}
}
return absl::OkStatus();
}
absl::StatusOr<RemapPlan> RemapPlan::FromProto(
DeviceList::LookupDeviceFunc lookup_device, const RemapPlanProto& proto) {
RemapPlan plan;
plan.input_specs.reserve(proto.input_specs_size());
for (const auto& input_spec_proto : proto.input_specs()) {
TF_ASSIGN_OR_RETURN(ArraySpec input_spec,
ArraySpec::FromProto(lookup_device, input_spec_proto));
plan.input_specs.push_back(std::move(input_spec));
}
plan.output_specs.reserve(proto.output_specs_size());
for (const auto& output_spec_proto : proto.output_specs()) {
TF_ASSIGN_OR_RETURN(ArraySpec output_spec,
ArraySpec::FromProto(lookup_device, output_spec_proto));
plan.output_specs.push_back(std::move(output_spec));
}
plan.mappings = std::make_shared<std::vector<Mapping>>();
plan.mappings->reserve(proto.mappings_size());
for (const auto& mapping_proto : proto.mappings()) {
TF_ASSIGN_OR_RETURN(auto mapping, MappingFromProto(mapping_proto));
plan.mappings->push_back(std::move(mapping));
}
return plan;
}
absl::StatusOr<RemapPlanProto> RemapPlan::ToProto() const {
RemapPlanProto proto;
proto.mutable_input_specs()->Reserve(input_specs.size());
for (const auto& input_spec : input_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_input_specs(), input_spec.ToProto());
}
proto.mutable_output_specs()->Reserve(output_specs.size());
for (const auto& output_spec : output_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_output_specs(), output_spec.ToProto());
}
proto.mutable_mappings()->Reserve(mappings->size());
for (const auto& mapping : *mappings) {
TF_ASSIGN_OR_RETURN(*proto.add_mappings(), MappingToProto(mapping));
}
return proto;
}
std::string RemapPlan::DebugString() const {
auto format_array_specs = [](absl::Span<const ArraySpec> array_specs) {
return absl::StrCat(
"[",
absl::StrJoin(array_specs, ",",
[](std::string* out, const ArraySpec& spec) {
absl::StrAppend(out, spec.DebugString());
}),
"]");
};
auto format_mappings = [](absl::Span<const Mapping> mappings) {
return absl::StrCat(
"[",
absl::StrJoin(mappings, ",",
[](std::string* out, const Mapping& mapping) {
absl::StrAppend(out, mapping.DebugString());
}),
"]");
};
return absl::StrCat(
"RemapPlan(output_specs=", format_array_specs(output_specs), ",",
"mappings=", format_mappings(*mappings), ")");
}
}
} | #include "xla/python/ifrt/remap_plan.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/sharding_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class RemapPlanTest : public test_util::ShardingTest {};
TEST_P(RemapPlanTest, ToFromProto) {
RemapPlan plan;
Shape shape({20, 20});
Shape shard_shape({5, 20});
DeviceList devices = GetDevices({0, 1, 2, 3});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(devices, MemoryKind(), shape,
shard_shape);
plan.input_specs.reserve(2);
plan.input_specs.push_back(ArraySpec{DType(DType::kF32),
shape, sharding});
plan.input_specs.push_back(ArraySpec{DType(DType::kF32),
shape, sharding});
plan.output_specs.reserve(2);
plan.output_specs.push_back(ArraySpec{
DType(DType::kF32), shape, sharding});
plan.output_specs.push_back(ArraySpec{
DType(DType::kF32), shape, sharding});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->reserve(2);
plan.mappings->push_back(RemapPlan::Mapping{
0, 1,
{RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{2, 4, 1}},
{RemapPlan::Interval{1, 4, 2}, RemapPlan::Interval{0, 4, 2}}});
plan.mappings->push_back(RemapPlan::Mapping{
1, 0,
{RemapPlan::Interval{0, 4, 2}, RemapPlan::Interval{1, 4, 2}},
{RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{2, 4, 1}}});
TF_ASSERT_OK_AND_ASSIGN(RemapPlanProto plan_proto, plan.ToProto());
TF_ASSERT_OK_AND_ASSIGN(
RemapPlan plan_copy,
RemapPlan::FromProto(absl::bind_front(&Client::LookupDevice, client()),
plan_proto));
EXPECT_THAT(*plan_copy.mappings, ElementsAreArray(*plan.mappings));
EXPECT_THAT(plan_copy.output_specs, SizeIs(2));
for (const auto& spec : plan_copy.input_specs) {
EXPECT_EQ(spec.dtype, DType(DType::kF32));
EXPECT_EQ(spec.shape, shape);
const auto* sharding_copy =
llvm::dyn_cast<ConcreteEvenSharding>(spec.sharding.get());
ASSERT_NE(sharding_copy, nullptr);
EXPECT_EQ(sharding_copy->devices(), devices);
EXPECT_EQ(sharding_copy->shape(), shape);
EXPECT_EQ(sharding_copy->shard_shape(), shard_shape);
}
for (const auto& spec : plan_copy.output_specs) {
EXPECT_EQ(spec.dtype, DType(DType::kF32));
EXPECT_EQ(spec.shape, shape);
const auto* sharding_copy =
llvm::dyn_cast<ConcreteEvenSharding>(spec.sharding.get());
ASSERT_NE(sharding_copy, nullptr);
EXPECT_EQ(sharding_copy->devices(), devices);
EXPECT_EQ(sharding_copy->shape(), shape);
EXPECT_EQ(sharding_copy->shard_shape(), shard_shape);
}
}
TEST_P(RemapPlanTest, InvalidInputDtype) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.input_specs.push_back(
ArraySpec{DType(DType::kF32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input must have the same dtype")));
}
TEST_P(RemapPlanTest, InvalidOutputDtype) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kF32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input and output must have the same dtype")));
}
TEST_P(RemapPlanTest, InvalidInputArrayIndex) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{1,
0,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].in_array must be in [0, 0], but is 1")));
}
TEST_P(RemapPlanTest, InvalidOutputArrayIndex) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
1,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].out_array must be in [0, 0], but is 1")));
}
TEST_P(RemapPlanTest, InvalidIntervalCount) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].from and mappings[0].to must have the same "
"number of intervals, but has 2 and 1 intervals")));
}
TEST_P(RemapPlanTest, InvalidShardIndex) {
auto run = [&](RemapPlan::Interval from, RemapPlan::Interval to) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{0, 0,
{from},
{to}});
return plan.Validate();
};
EXPECT_THAT(run(RemapPlan::Interval{-1, 1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{1, 1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is 1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{-1, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is 1")));
EXPECT_THAT(run(RemapPlan::Interval{0, -1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is 2")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, -1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 2, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is 2")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 0}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("step must be positive, but is 0")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, -1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("step must be positive, but is -1")));
}
TEST_P(RemapPlanTest, AlreadyUsedInputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 2, 1}}});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input array 0 shard 0 is already used")));
}
TEST_P(RemapPlanTest, UnassignedOutputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
0,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output array 0 shard 1 is unassigned")));
}
TEST_P(RemapPlanTest, AlreadyAssignedOutputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 2, 1}},
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output array 0 shard 0 is already assigned")));
}
TEST_P(RemapPlanTest, InvalidOutputDevices) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({1, 0}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
0,
{RemapPlan::Interval{0, 2, 1}},
{RemapPlan::Interval{0, 2, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"Output array 0 devices and sharding devices do not match")));
}
INSTANTIATE_TEST_SUITE_P(NumDevices, RemapPlanTest,
testing::Values(test_util::ShardingTestParam{
4,
4}));
}
}
} |
1,819 | cpp | tensorflow/tensorflow | index | third_party/xla/xla/python/ifrt/index.cc | third_party/xla/xla/python/ifrt/index_test.cc | #ifndef XLA_PYTHON_IFRT_INDEX_H_
#define XLA_PYTHON_IFRT_INDEX_H_
#include <cstdint>
#include <ostream>
#include <string>
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace ifrt {
class Index {
public:
static constexpr int kInlineElementSize = 6;
using Elements = absl::InlinedVector<int64_t, kInlineElementSize>;
explicit Index(absl::Span<const int64_t> elements)
: elements_(Elements(elements.begin(), elements.end())) {}
static Index Zeros(int num_elements) {
return Index(Elements(num_elements));
}
Index(const Index&) = default;
Index(Index&&) = default;
Index& operator=(const Index&) = default;
Index& operator=(Index&&) = default;
absl::Span<const int64_t> elements() const { return elements_; }
bool operator==(const Index& other) const {
return elements_ == other.elements_;
}
bool operator!=(const Index& other) const {
return elements_ != other.elements_;
}
Index operator+(const Index& offset) const {
CHECK_EQ(elements_.size(), offset.elements_.size());
Index result = *this;
for (int i = 0; i < elements_.size(); ++i) {
result.elements_[i] += offset.elements_[i];
}
return result;
}
Index operator-(const Index& offset) const {
CHECK_EQ(elements_.size(), offset.elements_.size());
Index result = *this;
for (int i = 0; i < elements_.size(); ++i) {
result.elements_[i] -= offset.elements_[i];
}
return result;
}
Index operator*(absl::Span<const int64_t> multiplier) const {
CHECK_EQ(elements_.size(), multiplier.size());
Index result = *this;
for (int i = 0; i < elements_.size(); ++i) {
result.elements_[i] *= multiplier[i];
}
return result;
}
Index& operator+=(const Index& offset) { return *this = *this + offset; }
Index& operator-=(const Index& offset) { return *this = *this - offset; }
Index& operator*=(absl::Span<const int64_t> multiplier) {
return *this = *this * multiplier;
}
std::string DebugString() const;
private:
Elements elements_;
};
std::ostream& operator<<(std::ostream& os, const Index& index);
}
}
#endif
#include "xla/python/ifrt/index.h"
#include <ostream>
#include <string>
#include "absl/strings/str_join.h"
namespace xla {
namespace ifrt {
std::string Index::DebugString() const {
return absl::StrCat("[", absl::StrJoin(elements_, ","), "]");
}
std::ostream& operator<<(std::ostream& os, const Index& index) {
return os << index.DebugString();
}
}
} | #include "xla/python/ifrt/index.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
TEST(IndexTest, Construction) {
EXPECT_THAT(Index({1, 2}).elements(), ElementsAre(1, 2));
EXPECT_THAT(Index::Zeros(2).elements(), ElementsAre(0, 0));
}
TEST(IndexTest, Operations) {
EXPECT_EQ(Index({1, 2}), Index({1, 2}));
EXPECT_NE(Index({1, 2}), Index({1, 3}));
Index a({11, 22});
Index b({2, 3});
EXPECT_EQ(a + b, Index({13, 25}));
{
Index c = a;
EXPECT_EQ(c += b, Index({13, 25}));
}
EXPECT_EQ(a - b, Index({9, 19}));
{
Index c = a;
EXPECT_EQ(c -= b, Index({9, 19}));
}
EXPECT_EQ(a * std::vector<int64_t>({1, 2}), Index({11, 44}));
{
Index c = a;
EXPECT_EQ(c *= std::vector<int64_t>({1, 2}), Index({11, 44}));
}
}
}
}
} |
1,820 | cpp | tensorflow/tensorflow | tuple | third_party/xla/xla/client/lib/tuple.cc | third_party/xla/xla/client/lib/tuple_test.cc | #ifndef XLA_CLIENT_LIB_TUPLE_H_
#define XLA_CLIENT_LIB_TUPLE_H_
#include "absl/status/statusor.h"
#include "xla/client/xla_builder.h"
#include "xla/shape_tree.h"
namespace xla {
absl::StatusOr<ShapeTree<XlaOp>> DisassembleTuple(XlaOp tuple);
XlaOp AssembleTuple(XlaBuilder* builder, ShapeTree<XlaOp> elements);
}
#endif
#include "xla/client/lib/tuple.h"
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "xla/client/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<ShapeTree<XlaOp>> DisassembleTuple(XlaOp tuple) {
TF_ASSIGN_OR_RETURN(Shape shape, tuple.builder()->GetShape(tuple));
ShapeTree<XlaOp> result(shape);
result.ForEachMutableElement([&](ShapeIndexView index, XlaOp* element) {
if (index.empty()) {
*element = tuple;
} else {
ShapeIndexView parent_index = index.subspan(0, index.size() - 1);
XlaOp parent = result.element(parent_index);
*element = GetTupleElement(parent, index.back());
}
});
return std::move(result);
}
XlaOp AssembleTuple(XlaBuilder* builder, ShapeTree<XlaOp> elements) {
elements.ForEachMutableElementPostOrder(
[&](const ShapeIndex& index, XlaOp* element) {
const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);
if (subshape.IsTuple()) {
absl::InlinedVector<XlaOp, 2> children;
ShapeIndex child_index = index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(elements.element(child_index));
child_index.pop_back();
}
*element = Tuple(builder, children);
}
});
return elements.element({});
}
} | #include "xla/client/lib/tuple.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "xla/client/global_data.h"
#include "xla/client/xla_builder.h"
#include "xla/error_spec.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TupleTest : public ClientLibraryTestBase {};
XLA_TEST_F(TupleTest, DisassembleAssemble) {
XlaBuilder builder(TestName());
Shape shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(S32, {3}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {4}), ShapeUtil::MakeShape(S32, {5})}),
ShapeUtil::MakeShape(S32, {6}),
});
Literal input = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({3}, int32_t{42}),
LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({4}, int32_t{43}),
LiteralUtil::CreateFullWithDescendingLayout({5}, int32_t{44})),
LiteralUtil::CreateFullWithDescendingLayout({6}, int32_t{45}));
XlaOp param = Parameter(&builder, 0, shape, "param");
TF_ASSERT_OK_AND_ASSIGN(ShapeTree<XlaOp> disassembled_tuple,
DisassembleTuple(param));
int32_t addend = 1;
disassembled_tuple.ForEachMutableElement([&](const ShapeIndex& index,
XlaOp* element) {
const Shape& subshape = ShapeUtil::GetSubshape(shape, index);
if (subshape.IsArray()) {
*element = Add(
*element,
ConstantLiteral(&builder, LiteralUtil::CreateFullWithDescendingLayout(
subshape.dimensions(), addend)));
++addend;
}
});
AssembleTuple(&builder, std::move(disassembled_tuple));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> data,
client_->TransferToServer(input));
Literal expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({3}, int32_t{43}),
LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({4}, int32_t{45}),
LiteralUtil::CreateFullWithDescendingLayout({5}, int32_t{47})),
LiteralUtil::CreateFullWithDescendingLayout({6}, int32_t{49}));
ComputeAndCompareLiteral(&builder, expected, {data.get()}, ErrorSpec(0),
&shape);
}
}
} |
1,821 | cpp | tensorflow/tensorflow | value | third_party/xla/xla/python/ifrt/value.cc | tensorflow/cc/experimental/libtf/tests/value_test.cc | #ifndef XLA_PYTHON_IFRT_VALUE_H_
#define XLA_PYTHON_IFRT_VALUE_H_
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/concurrency/ref_count.h"
namespace xla {
namespace ifrt {
class Client;
class Value : public tsl::ReferenceCounted<Value>,
public llvm::RTTIExtends<Value, llvm::RTTIRoot> {
public:
Value() = default;
Value(const Value&) = delete;
Value(Value&&) = delete;
Value& operator=(const Value&) = delete;
Value& operator=(Value&&) = delete;
virtual Client* client() const = 0;
virtual Future<> GetReadyFuture() const = 0;
virtual Future<> Delete() = 0;
virtual bool IsDeleted() const = 0;
virtual std::string DebugString() const = 0;
static char ID;
};
}
}
#endif
#include "xla/python/ifrt/value.h"
namespace xla {
namespace ifrt {
char Value::ID = 0;
}
} | #include "tensorflow/cc/experimental/libtf/value.h"
#include <cstdint>
#include "tensorflow/cc/experimental/libtf/value_iostream.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
namespace impl {
TEST(ValueTest, TestBasic) {
TaggedValue valuef(3.f);
TaggedValue valuei(int64_t(3));
TaggedValue list = TaggedValue::List();
TaggedValue tuple = TaggedValue::Tuple();
tuple.tuple().push_back(TaggedValue(int64_t(310)));
list.list().push_back(valuei);
list.list().push_back(valuef);
list.list().push_back(tuple);
std::stringstream stream;
stream << list;
ASSERT_EQ(stream.str(), "[3, 3, (310, ), ]");
}
TEST(ValueTest, TestString) {
TaggedValue value1a("string1");
std::string s = "string";
s += "1";
TaggedValue value1b(s.c_str());
ASSERT_EQ(value1b.s(), value1a.s());
TaggedValue value2("string2");
ASSERT_NE(value1a.s(), value2.s());
ASSERT_STREQ(value1a.s(), "string1");
ASSERT_STREQ(value2.s(), "string2");
}
TEST(Test1, TestDict) {
TaggedValue s1("test1");
TaggedValue s2("test2");
TaggedValue d = TaggedValue::Dict();
d.dict()[s2] = TaggedValue(6.f);
std::stringstream stream;
stream << d;
ASSERT_EQ(stream.str(), "{test2: 6, }");
}
namespace {
TaggedValue add(TaggedValue args, TaggedValue kwargs) {
if (args.type() == TaggedValue::TUPLE) {
return TaggedValue(args.tuple()[0].f32() + args.tuple()[1].f32());
}
return TaggedValue::None();
}
}
TEST(Test1, TestFunctionCall) {
TaggedValue f32 = TaggedValue(add);
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(1.f));
args.tuple().emplace_back(TaggedValue(2.f));
TaggedValue c = f32.func()(args, TaggedValue::None()).value();
ASSERT_EQ(c, TaggedValue(3.f));
}
namespace {
int alloc_count = 0;
class Cool {
public:
Cool() { alloc_count++; }
~Cool() { alloc_count--; }
};
}
TEST(Test1, TestCapsule) {
TaggedValue test_moved, test_copy;
ASSERT_EQ(alloc_count, 0);
void* ptr_value = new Cool();
{
TaggedValue capsule =
TaggedValue::Capsule(static_cast<void*>(ptr_value),
[](void* x) { delete static_cast<Cool*>(x); });
ASSERT_EQ(alloc_count, 1);
ASSERT_EQ(capsule.capsule(), ptr_value);
test_moved = std::move(capsule);
ASSERT_EQ(capsule.type(), TaggedValue::NONE);
test_copy = test_moved;
ASSERT_EQ(test_moved.capsule(), ptr_value);
ASSERT_EQ(test_copy.capsule(), ptr_value);
}
ASSERT_EQ(alloc_count, 1);
test_moved = TaggedValue::None();
ASSERT_EQ(alloc_count, 1);
test_copy = TaggedValue(3.f);
ASSERT_EQ(alloc_count, 0);
}
}
}
} |
1,822 | cpp | tensorflow/tensorflow | sharding_conversions | third_party/xla/xla/python/ifrt/support/sharding_conversions.cc | third_party/xla/xla/python/ifrt/support/sharding_conversions_test.cc | #ifndef XLA_PYTHON_IFRT_SUPPORT_SHARDING_CONVERSIONS_H_
#define XLA_PYTHON_IFRT_SUPPORT_SHARDING_CONVERSIONS_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace ifrt {
namespace support {
absl::StatusOr<OpSharding> ToOpSharding(const Sharding& sharding);
absl::StatusOr<OpSharding> ToOpSharding(
const ShardingParam& sharding_param,
const xla::ifrt::DeviceList& device_mapping);
absl::StatusOr<HloSharding> ToHloSharding(const ShardingParam& sharding_param);
absl::StatusOr<ShardingParam> ToShardingParam(const HloSharding& hlo_sharding,
int rank, int num_devices);
}
}
}
#endif
#include "xla/python/ifrt/support/sharding_conversions.h"
#include <cstdint>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace ifrt {
namespace support {
absl::StatusOr<OpSharding> ToOpSharding(const Sharding& sharding) {
if (auto* sharding_param_sharding =
llvm::dyn_cast<xla::ifrt::ShardingParamSharding>(&sharding)) {
return ToOpSharding(sharding_param_sharding->sharding_param(),
sharding_param_sharding->devices());
} else {
return absl::InvalidArgumentError(
"Only conversion from `ShardingParamSharding` to `OpSharding` is "
"supported.");
}
}
absl::StatusOr<OpSharding> ToOpSharding(
const ShardingParam& sharding_param,
const xla::ifrt::DeviceList& device_mapping) {
OpSharding op_sharding;
{
bool all_dim_replicated = true;
for (const int64_t dim_shard : sharding_param.dim_shards()) {
if (dim_shard != 1) {
all_dim_replicated = false;
break;
}
}
if (all_dim_replicated) {
op_sharding.set_type(OpSharding::REPLICATED);
return op_sharding;
}
}
op_sharding.set_type(OpSharding::OTHER);
auto* tile_assignment_dims = op_sharding.mutable_tile_assignment_dimensions();
int64_t cum_size = 1;
tile_assignment_dims->Reserve(sharding_param.dim_shards().size() + 1);
for (const int64_t dim_shard : sharding_param.dim_shards()) {
cum_size *= dim_shard;
tile_assignment_dims->Add(dim_shard);
}
int device_count = 1;
for (const int axis_size : sharding_param.minor_to_major().axis_sizes) {
device_count *= axis_size;
}
if (device_count != cum_size) {
op_sharding.set_replicate_on_last_tile_dim(true);
tile_assignment_dims->Add(device_count / cum_size);
}
llvm::SmallVector<int> logical_device_ids;
sharding_param.minor_to_major().ToDeviceList(logical_device_ids);
auto* tile_assignment_devices = op_sharding.mutable_tile_assignment_devices();
tile_assignment_devices->Reserve(logical_device_ids.size());
for (const int logical_device_id : logical_device_ids) {
if (logical_device_id < 0 || logical_device_id >= device_mapping.size()) {
return absl::OutOfRangeError(
absl::StrCat("Can't map device with logical id ", logical_device_id,
". The logical device id should be within [0, ",
device_mapping.size(), ")."));
}
tile_assignment_devices->Add(
device_mapping[logical_device_id]->Id().value());
}
return op_sharding;
}
absl::StatusOr<HloSharding> ToHloSharding(const ShardingParam& sharding_param) {
auto axis_sizes = sharding_param.minor_to_major().axis_sizes;
llvm::SmallVector<int64_t> reshape_dims;
reshape_dims.reserve(axis_sizes.size());
int device_count = 1;
for (auto axis_size : llvm::reverse(axis_sizes)) {
reshape_dims.push_back(axis_size);
device_count *= axis_size;
}
if (device_count == 1) {
return HloSharding::Replicate();
}
int64_t cum_size = 1;
llvm::SmallVector<int64_t> dims;
dims.reserve(sharding_param.dim_shards().size());
for (const int64_t dim_shard : sharding_param.dim_shards()) {
cum_size *= dim_shard;
dims.push_back(dim_shard);
}
llvm::SmallVector<int, 4> permutation;
int num_axis = sharding_param.minor_to_major().permutation.size();
permutation.reserve(num_axis);
for (const int axis_id :
llvm::reverse(sharding_param.minor_to_major().permutation)) {
permutation.push_back(num_axis - axis_id - 1);
}
if (device_count != cum_size) {
dims.push_back(device_count / cum_size);
return HloSharding::PartialTile(
TileAssignment(dims, reshape_dims, permutation));
} else {
return HloSharding::IotaTile(dims, reshape_dims, permutation);
}
}
absl::StatusOr<ShardingParam> ToShardingParam(const HloSharding& hlo_sharding,
int rank, int num_devices) {
ShardingParam::MinorToMajor minor_to_major;
if (hlo_sharding.IsReplicated() ||
(hlo_sharding.IsTileMaximal() && hlo_sharding.HasUniqueDevice() &&
num_devices == 1)) {
llvm::SmallVector<int64_t> dim_shards(rank, 1);
minor_to_major.permutation.push_back(0);
minor_to_major.axis_sizes.push_back(num_devices);
return ShardingParam(dim_shards, std::move(minor_to_major));
} else if (hlo_sharding.IsTiled()) {
const xla::TileAssignment& tile_assignment = hlo_sharding.tile_assignment();
if (!tile_assignment.iota()) {
return absl::InvalidArgumentError(absl::StrCat(
"Conversion from `HloSharding` without `IotaTileAssignment` is not "
"supported; sharding=",
hlo_sharding.ToString()));
}
if (rank != hlo_sharding.TiledDataRank()) {
return absl::InvalidArgumentError(absl::StrFormat(
"`TiledData` expected to have have %d dimensions, but has %d "
"dimensions; sharding=%s",
rank, hlo_sharding.TiledDataRank(), hlo_sharding.ToString()));
}
if (hlo_sharding.subgroup_types().size() > 1 ||
(hlo_sharding.subgroup_types().size() == 1 &&
hlo_sharding.subgroup_types()[0] != xla::OpSharding::REPLICATED)) {
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported conversion to `ShardingParam` from `HloSharding` that "
"has more than a subgroup or a subgroup that is not REPLICATED; "
"sharding=",
hlo_sharding.ToString()));
}
llvm::SmallVector<int64_t> dim_shards(tile_assignment.dimensions().begin(),
tile_assignment.dimensions().end());
if (hlo_sharding.ReplicateOnLastTileDim() ||
(hlo_sharding.subgroup_types().size() == 1 &&
hlo_sharding.subgroup_types()[0] == xla::OpSharding::REPLICATED)) {
dim_shards.pop_back();
}
if (tile_assignment.iota()->reshape_dims().empty()) {
minor_to_major.permutation.push_back(0);
minor_to_major.axis_sizes.push_back(num_devices);
} else {
for (auto reshape_dim :
llvm::reverse(tile_assignment.iota()->reshape_dims())) {
minor_to_major.axis_sizes.push_back(reshape_dim);
}
int num_axis = tile_assignment.iota()->transpose_perm().size();
for (int axis_id :
llvm::reverse(tile_assignment.iota()->transpose_perm())) {
minor_to_major.permutation.push_back(num_axis - axis_id - 1);
}
}
return ShardingParam(dim_shards, std::move(minor_to_major));
}
return absl::UnimplementedError(
absl::StrCat("Unsupported conversion to `ShardingParam` from "
"`HloSharding`; sharding=",
hlo_sharding.ToString()));
}
}
}
} | #include "xla/python/ifrt/support/sharding_conversions.h"
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace support {
namespace {
using ::testing::Return;
using ::tsl::testing::StatusIs;
using xla::HloSharding;
absl::StatusOr<HloSharding> ToHloShardingViaOpSharding(
const ShardingParam& sharding_param, const DeviceList& device_list) {
TF_ASSIGN_OR_RETURN(xla::OpSharding op_sharding,
ToOpSharding(sharding_param, device_list));
return HloSharding::FromProto(op_sharding);
}
struct ShardingConversionTestClientState {
absl::flat_hash_map<DeviceId, std::unique_ptr<Device>> device_map;
std::vector<Device*> devices;
};
std::shared_ptr<MockClient> MakeTestClient(int num_devices) {
auto state = std::make_shared<ShardingConversionTestClientState>();
state->devices.reserve(num_devices);
for (int i = 0; i < num_devices; ++i) {
auto device = std::make_unique<MockDevice>();
ON_CALL(*device, Id).WillByDefault(Return(DeviceId(i)));
state->devices.push_back(device.get());
state->device_map.insert({DeviceId(i), std::move(device)});
}
auto client = std::make_shared<MockClient>();
ON_CALL(*client, devices)
.WillByDefault(
[state]() -> absl::Span<Device* const> { return state->devices; });
return client;
}
class ShardingConversionsTest : public testing::TestWithParam<int> {
public:
void SetUp() override { client_ = MakeTestClient(GetParam()); }
DeviceList GetDevices(absl::Span<const int> device_indices) {
return test_util::GetDevices(client_.get(), device_indices).value();
}
void AssertSameTiling(const ShardingParam& sharding_param,
const HloSharding& hlo_sharding, const Shape& shape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const Sharding> sharding,
ShardingParamSharding::Create(
sharding_param, device_list, MemoryKind()));
const xla::Shape xla_shape(PrimitiveType::F16, shape.dims(), {}, {});
TF_ASSERT_OK_AND_ASSIGN(const std::vector<IndexDomain> index_domains,
sharding->IndexDomains(shape));
ASSERT_EQ(index_domains.size(),
hlo_sharding.tile_assignment().num_elements());
const xla::Shape xla_tile_shape = hlo_sharding.TileShape(xla_shape);
for (int i = 0; i < index_domains.size(); ++i) {
SCOPED_TRACE(absl::StrCat("on device ", i));
EXPECT_EQ(index_domains[i].origin().elements(),
hlo_sharding.TileOffsetForDevice(xla_shape, i));
EXPECT_EQ(index_domains[i].shape().dims(), xla_tile_shape.dimensions());
}
}
private:
std::shared_ptr<Client> client_;
};
TEST_P(ShardingConversionsTest, Replicated) {
ShardingParam expected_sharding_param{
{1, 1, 1},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(), "{replicated}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 3, 6));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(hlo_iota_sharding, actual_hlo_sharding);
}
TEST_P(ShardingConversionsTest, SingleDeviceReplicated) {
ShardingParam expected_sharding_param{
{1, 1}, {{0}, {1}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param, GetDevices({0})));
EXPECT_EQ(hlo_sharding.ToString(), "{replicated}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 2, 1));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, Permutation) {
ShardingParam expected_sharding_param{
{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[2,1,3]0,3,1,4,2,5}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 3, 6));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, Partial) {
ShardingParam expected_sharding_param{
{2, 1}, {{0, 1}, {2, 3}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(),
"{devices=[2,1,3]0,1,2,3,4,5 last_tile_dim_replicate}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 2, 6));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(hlo_iota_sharding, actual_hlo_sharding);
}
TEST_P(ShardingConversionsTest, OneDimToTwoAxes) {
ShardingParam expected_sharding_param{
{4}, {{1, 0}, {2, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[4]0,2,1,3}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 1, 4));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, NonTrivialDeviceAssignment) {
ShardingParam expected_sharding_param{
{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({6, 5, 4, 3, 2, 1})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[2,1,3]6,3,5,2,4,1}");
}
TEST_P(ShardingConversionsTest, VerifyIncorrectShardings) {
ShardingParam different_permutation_and_axis{
{1, 1}, {{0, 1}, {2}}};
EXPECT_FALSE(different_permutation_and_axis.verify().ok());
ShardingParam too_many_slices{{2, 2},
{{0}, {2}}};
EXPECT_FALSE(too_many_slices.verify().ok());
ShardingParam incorrect_permutation{
{4, 1},
{{0, 1, 1}, {2, 2, 2}}};
EXPECT_FALSE(incorrect_permutation.verify().ok());
}
TEST_P(ShardingConversionsTest, ErrorOnDeviceAssignment) {
ShardingParam sharding_param{{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(sharding_param.verify());
EXPECT_THAT(
ToHloShardingViaOpSharding(sharding_param, GetDevices({6, 5, 4, 3, 2})),
StatusIs(absl::StatusCode::kOutOfRange,
::testing::HasSubstr("Can't map device with logical id 5")));
}
TEST_P(ShardingConversionsTest, ShardingParamFullySharded) {
ShardingParam sharding_param{{2, 3},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, ShardingParamWithPermutation) {
ShardingParam sharding_param{{2, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, ShardingParamWithReplication) {
ShardingParam sharding_param{{2, 1},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, OpShardingReplicated) {
OpSharding op_sharding;
op_sharding.set_type(OpSharding::REPLICATED);
TF_ASSERT_OK_AND_ASSIGN(auto hlo_sharding,
HloSharding::FromProto(op_sharding));
TF_ASSERT_OK_AND_ASSIGN(auto actual, ToShardingParam(hlo_sharding, 2, 6));
ShardingParam expected{{1, 1},
{{0}, {6}}};
TF_EXPECT_OK(expected.verify());
EXPECT_EQ(actual, expected);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ShardingConversionsTest,
testing::Values(7));
struct HloShardingTestStruct {
HloSharding hlo_sharding;
int rank;
int num_devices;
};
class HloShardingToShardingParamTest
: public testing::TestWithParam<HloShardingTestStruct> {
public:
void SetUp() override {
const auto& param = GetParam();
client_ = MakeTestClient(param.num_devices);
}
DeviceList GetDevices(absl::Span<const int> device_indices) {
return test_util::GetDevices(client_.get(), device_indices).value();
}
private:
std::shared_ptr<Client> client_;
};
TEST_P(HloShardingToShardingParamTest, HloShardingToShardingParam) {
const auto& param = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto sharding_param,
ToShardingParam(param.hlo_sharding, param.rank, param.num_devices));
EXPECT_TRUE(sharding_param.verify().ok());
TF_ASSERT_OK_AND_ASSIGN(auto actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(param.hlo_sharding, actual_hlo_sharding);
std::vector<int> device_ids(param.num_devices);
std::iota(device_ids.begin(), device_ids.end(), 0);
TF_ASSERT_OK_AND_ASSIGN(
auto hlo_via_op_sharding,
ToHloShardingViaOpSharding(sharding_param,
GetDevices(absl::MakeSpan(device_ids))));
EXPECT_EQ(param.hlo_sharding, hlo_via_op_sharding);
}
INSTANTIATE_TEST_SUITE_P(
HloShardingConversionTests, HloShardingToShardingParamTest,
testing::ValuesIn<HloShardingTestStruct>({
{HloSharding::IotaTile({4, 2}), 2, 8},
{HloSharding::IotaTile({2, 4}, {4, 2}, {1, 0}), 2, 8},
{HloSharding::IotaTile({8, 1}), 2, 8},
{HloSharding::IotaTile({8, 1}, {4, 2}, {1, 0}), 2, 8},
{HloSharding::PartialTile(TileAssignment({4, 1, 2}, {8}, {0})), 2, 8},
{HloSharding::PartialTile(TileAssignment({2, 1, 4}, {4, 2}, {1, 0})), 2,
8},
{HloSharding::PartialTile(TileAssignment({1, 4, 2}, {8}, {0})), 2, 8},
{HloSharding::PartialTile(TileAssignment({1, 2, 4}, {4, 2}, {1, 0})), 2,
8},
{HloSharding::PartialTile(TileAssignment({4, 3, 2}, {2, 3, 4},
{2, 1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({4, 2, 3}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({6, 1, 4}, {24}, {0})), 2, 24},
{HloSharding::PartialTile(TileAssignment({12, 1, 2}, {2, 12}, {1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({8, 1, 3}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({2, 1, 12}, {24}, {0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({3, 1, 8}, {2, 3, 4},
{1, 0, 2})),
2, 24},
{HloSharding::PartialTile(TileAssignment({1, 4, 6}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({1, 12, 2}, {2, 12}, {1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({3, 2, 1, 4}, {2, 3, 4},
{1, 0, 2})),
3, 24},
{HloSharding::PartialTile(TileAssignment({2, 4, 1, 3}, {2, 3, 4},
{0, 2, 1})),
3, 24},
{HloSharding::PartialTile(TileAssignment({4, 3, 1, 2}, {2, 3, 4},
{2, 1, 0})),
3, 24},
{HloSharding::PartialTile(TileAssignment({12, 1, 1, 2}, {2, 12},
{1, 0})),
3, 24},
}));
}
}
}
} |
1,823 | cpp | tensorflow/tensorflow | type_to_shape | third_party/xla/xla/translate/mhlo_to_hlo/type_to_shape.cc | third_party/xla/xla/translate/mhlo_to_hlo/type_to_shape_test.cc | #ifndef XLA_TRANSLATE_MHLO_TO_HLO_TYPE_TO_SHAPE_H_
#define XLA_TRANSLATE_MHLO_TO_HLO_TYPE_TO_SHAPE_H_
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Types.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
Shape TypeToShape(mlir::Type type);
}
#endif
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/SparseTensor/IR/Enums.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
using ::int64_t;
using mlir::MemRefType;
using mlir::RankedTensorType;
using mlir::ShapedType;
using mlir::VectorType;
using mlir::mhlo::TypeExtensionsAttr;
using xla::PrimitiveType;
namespace xla {
std::optional<std::tuple<DimLevelType, bool, bool>> ConvertDimLevelType(
mlir::sparse_tensor::LevelType lt) {
auto f = mlir::sparse_tensor::getLevelFormat(lt);
if (!f) return std::nullopt;
bool unique = mlir::sparse_tensor::isUniqueLT(lt);
bool ordered = mlir::sparse_tensor::isOrderedLT(lt);
switch (*f) {
case mlir::sparse_tensor::LevelFormat::Singleton:
return std::make_tuple(DimLevelType::DIM_SINGLETON, unique, ordered);
case mlir::sparse_tensor::LevelFormat::Compressed:
return std::make_tuple(DimLevelType::DIM_COMPRESSED, unique, ordered);
case mlir::sparse_tensor::LevelFormat::Dense:
return std::make_tuple(DimLevelType::DIM_DENSE, unique, ordered);
case mlir::sparse_tensor::LevelFormat::LooseCompressed:
return std::make_tuple(DimLevelType::DIM_LOOSE_COMPRESSED, unique,
ordered);
default:
return std::nullopt;
}
}
Shape TypeToShape(mlir::Type type) {
PrimitiveType ptype = ConvertMlirTypeToPrimitiveType(type);
if (ptype != PrimitiveType::PRIMITIVE_TYPE_INVALID)
return ShapeUtil::MakeShape(ptype, {});
if (type.isIntOrFloat()) {
auto* context = type.getContext();
mlir::emitError(mlir::UnknownLoc::get(context))
<< "lowering should have been handled by primitive type lowering for "
<< debugString(type);
} else if (auto v = mlir::dyn_cast<mlir::VectorType>(type)) {
llvm::SmallVector<int64_t, 4> span(v.getShape().begin(),
v.getShape().end());
mlir::Type element_type = v.getElementType();
PrimitiveType primitive_type = ConvertMlirTypeToPrimitiveType(element_type);
if (primitive_type != PrimitiveType::PRIMITIVE_TYPE_INVALID)
return ShapeUtil::MakeShape(primitive_type, span);
} else if (auto m = mlir::dyn_cast<mlir::MemRefType>(type)) {
llvm::SmallVector<int64_t, 6> span(m.getShape().begin(),
m.getShape().end());
mlir::Type element_type = m.getElementType();
if (auto v = mlir::dyn_cast<mlir::VectorType>(element_type)) {
element_type = v.getElementType();
span.insert(span.end(), v.getShape().begin(), v.getShape().end());
}
PrimitiveType primitive_type = ConvertMlirTypeToPrimitiveType(element_type);
if (primitive_type == PrimitiveType::PRIMITIVE_TYPE_INVALID) return {};
if (m.getLayout().isIdentity())
return ShapeUtil::MakeShape(primitive_type, span);
llvm::SmallVector<int64_t, 4> strides;
int64_t offset;
if (failed(mlir::getStridesAndOffset(m, strides, offset))) return {};
llvm::SmallVector<std::pair<int64_t, int>, 4> strides_with_indices;
for (const auto& e : llvm::enumerate(strides)) {
strides_with_indices.push_back({e.value(), e.index()});
}
std::stable_sort(strides_with_indices.begin(), strides_with_indices.end());
llvm::SmallVector<int64_t, 4> minor_to_major;
int64_t stride = 1;
for (const auto& pr : strides_with_indices) {
minor_to_major.push_back(pr.second);
if (stride != pr.first && m.getShape()[pr.second] != 1) return {};
stride *= m.getShape()[pr.second];
}
llvm::SmallVector<int64_t, 4> dimensions(m.getShape().begin(),
m.getShape().end());
return ::xla::ShapeUtil::MakeShapeWithDenseLayout(
primitive_type, dimensions, minor_to_major);
} else if (auto t = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
int64_t rank = t.getRank();
llvm::SmallVector<int64_t, 4> bounds;
if (auto extn =
mlir::dyn_cast_or_null<TypeExtensionsAttr>(t.getEncoding())) {
bounds = llvm::to_vector<4>(extn.getBounds());
} else {
bounds.assign(rank, ShapedType::kDynamic);
}
llvm::SmallVector<int64_t, 4> shape(rank, mlir::ShapedType::kDynamic);
std::vector<bool> is_dynamic(rank, false);
for (int64_t dim = 0; dim < rank; ++dim) {
int64_t size = t.getDimSize(dim);
if (size == ShapedType::kDynamic) {
shape[dim] = bounds[dim] != ShapedType::kDynamic
? bounds[dim]
: Shape::kUnboundedSize;
is_dynamic[dim] = true;
} else {
if (bounds[dim] != ShapedType::kDynamic) return {};
shape[dim] = size;
}
}
PrimitiveType primitive_type =
ConvertMlirTypeToPrimitiveType(t.getElementType());
if (primitive_type == PrimitiveType::PRIMITIVE_TYPE_INVALID) return {};
if (auto sparse = mlir::sparse_tensor::getSparseTensorEncoding(type)) {
if (!t.hasStaticShape()) return {};
if (sparse.getPosWidth() != 32 || sparse.getCrdWidth() != 32) return {};
llvm::SmallVector<DimLevelType, 3> lvl_types;
llvm::SmallVector<bool, 3> level_unique;
llvm::SmallVector<bool, 3> level_ordered;
for (auto lt : sparse.getLvlTypes()) {
auto new_lt = ConvertDimLevelType(lt);
if (!new_lt) return {};
lvl_types.push_back(std::get<0>(*new_lt));
level_unique.push_back(std::get<1>(*new_lt));
level_ordered.push_back(std::get<2>(*new_lt));
}
std::vector<int64_t> ordering(rank);
std::iota(ordering.rbegin(), ordering.rend(), 0);
auto dimToLvl = sparse.getDimToLvl()
? sparse.getDimToLvl()
: mlir::AffineMap::getMultiDimIdentityMap(
rank, sparse.getContext());
auto final_ordering = mlir::applyPermutationMap(
dimToLvl, llvm::ArrayRef<int64_t>(ordering));
auto sparse_shape = ::xla::ShapeUtil::MakeShapeWithSparseLayout(
primitive_type, shape, final_ordering, lvl_types, level_unique,
level_ordered);
return sparse_shape;
}
return ShapeUtil::MakeShape(primitive_type, shape, is_dynamic);
} else if (auto tuple_type = mlir::dyn_cast<mlir::TupleType>(type)) {
llvm::SmallVector<Shape, 4> shapes;
shapes.reserve(tuple_type.size());
for (mlir::Type sub_type : tuple_type.getTypes()) {
shapes.push_back(TypeToShape(sub_type));
}
return ShapeUtil::MakeTupleShape(shapes);
} else if (mlir::isa<mlir::mhlo::TokenType>(type) ||
mlir::isa<mlir::stablehlo::TokenType>(type)) {
return ShapeUtil::MakeTokenShape();
} else if (auto bundle_type =
mlir::dyn_cast<mlir::mhlo::AsyncBundleType>(type)) {
auto tuple_type =
mlir::TupleType::get(type.getContext(), bundle_type.getTypes());
return TypeToShape(tuple_type);
}
return {};
}
} | #include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include <iostream>
#include <utility>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/translate/hlo_to_mhlo/hlo_utils.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
using mlir::Builder;
using mlir::MemRefType;
using mlir::MLIRContext;
using mlir::RankedTensorType;
using mlir::UnrankedTensorType;
using mlir::VectorType;
namespace xla {
namespace {
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
TEST(TypeToShapeTest, ConvertBasicTypesToTypes) {
MLIRContext context;
Builder b(&context);
EXPECT_TRUE(
ShapeUtil::IsScalarWithElementType(TypeToShape(b.getF32Type()), F32));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getIntegerType(32))).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getIntegerType(17))).ToProto(),
EqualsProto(Shape().ToProto()));
}
TEST(TypeToShapeTest, ConvertMemRefTypeToTypes) {
MLIRContext context;
Builder b(&context);
EXPECT_THAT(
TypeToShape(MemRefType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(MemRefType::get({100, 13, 210}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {100, 13, 210}).ToProto()));
EXPECT_THAT(
TypeToShape(MemRefType::get({100, 13, 210},
VectorType::get({8, 128}, b.getF32Type())))
.ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {100, 13, 210, 8, 128})
.ToProto()));
}
TEST(TypeToShapeTest, ConvertTensorTypeToTypes) {
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect>();
Builder b(&context);
EXPECT_THAT(
TypeToShape(RankedTensorType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
llvm::SmallVector<int64_t, 4> bounds = {8, mlir::ShapedType::kDynamic};
auto extensions = mlir::mhlo::TypeExtensionsAttr::get(&context, bounds);
EXPECT_THAT(
TypeToShape(RankedTensorType::get({mlir::ShapedType::kDynamic, 128},
b.getF32Type(), extensions))
.ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}, {true, false})
.ToProto()));
EXPECT_THAT(
TypeToShape(RankedTensorType::get({mlir::ShapedType::kDynamic, 784},
b.getF32Type()))
.ToProto(),
EqualsProto(ShapeUtil::MakeShape(PrimitiveType::F32,
{Shape::kUnboundedSize, 784},
{true, false})
.ToProto()));
EXPECT_THAT(TypeToShape(UnrankedTensorType::get(b.getF32Type())).ToProto(),
EqualsProto(Shape().ToProto()));
EXPECT_THAT(
TypeToShape(RankedTensorType::get(
{8, 128}, VectorType::get({16, 16}, b.getF32Type())))
.ToProto(),
EqualsProto(Shape().ToProto()));
}
TEST(TypeToShapeTest, ConvertMemRefToShape) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::F32,
{10, 20, 30}, {2, 0, 1});
MLIRContext context;
mlir::Builder builder(&context);
absl::StatusOr<mlir::Type> mlir_type =
ConvertShapeToType<MemRefType>(shape, builder);
ASSERT_TRUE(mlir_type.ok());
mlir::Type type = std::move(mlir_type).value();
Shape converted = TypeToShape(type);
EXPECT_TRUE(ShapeUtil::Equal(
converted, ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::F32,
{10, 20, 30}, {2, 0, 1})));
EXPECT_TRUE(ShapeUtil::Equal(converted, shape));
}
TEST(TypeToShapeTest, ConvertMemRefToShape2) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::C64,
{2, 4, 3, 3}, {2, 3, 1, 0});
MLIRContext context;
mlir::Builder builder(&context);
absl::StatusOr<mlir::Type> mlir_type =
ConvertShapeToType<MemRefType>(shape, builder);
ASSERT_TRUE(mlir_type.ok());
mlir::Type type = std::move(mlir_type).value();
Shape converted = TypeToShape(type);
EXPECT_TRUE(ShapeUtil::Equal(
converted, ShapeUtil::MakeShapeWithDenseLayout(
PrimitiveType::C64, {2, 4, 3, 3}, {2, 3, 1, 0})));
EXPECT_TRUE(ShapeUtil::Equal(converted, shape));
}
}
} |
1,824 | cpp | tensorflow/tensorflow | hlo_utils | third_party/xla/xla/mlir_hlo/utils/hlo_utils.cc | third_party/xla/xla/translate/hlo_to_mhlo/hlo_utils_test.cc | #ifndef MLIR_HLO_UTILS_HLO_UTILS_H
#define MLIR_HLO_UTILS_HLO_UTILS_H
#include <complex>
#include <string>
#include <utility>
#include <vector>
#include "mlir/Dialect/Complex/IR/Complex.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/ChloOps.h"
namespace mlir {
namespace hlo {
mlir::DenseI64ArrayAttr getBroadcastDimensionsAttr(mlir::Builder* b,
mlir::Value x, mlir::Value y,
bool allowEmpty = true);
template <typename T>
static ElementsAttr getSplat(Builder* b, RankedTensorType ty, T constant) {
Type elementTy = getElementTypeOrSelf(ty);
if (elementTy.isSignlessInteger())
return DenseElementsAttr::get(ty, b->getIntegerAttr(elementTy, constant));
if (mlir::isa<FloatType>(elementTy))
return DenseElementsAttr::get(ty, b->getFloatAttr(elementTy, constant));
if (auto complexTy = mlir::dyn_cast<ComplexType>(elementTy)) {
auto complexElementTy = complexTy.getElementType();
if (complexElementTy.isF32())
return DenseElementsAttr::get(ty,
static_cast<std::complex<float>>(constant));
if (complexElementTy.isF64())
return DenseElementsAttr::get(
ty, static_cast<std::complex<double>>(constant));
}
llvm_unreachable("unhandled element type");
}
template <typename T>
static ElementsAttr getSplat(Builder* b, Value val, T constant) {
return getSplat(b, mlir::cast<RankedTensorType>(val.getType()), constant);
}
DenseElementsAttr getScalarOfType(Type ty, int64_t rawValue);
DenseElementsAttr getScalarNegZeroOfType(Type ty);
enum ScalarLimit {
kLowest,
kInfinityLowest,
kMax,
kInfinityMax,
};
DenseElementsAttr getScalarLimitOfType(Type ty, ScalarLimit limit);
std::string lmhloToMhloOpName(llvm::StringRef opName,
mlir::MLIRContext* context);
bool isSequenceStartingWith0(Attribute attr);
int64_t getArgumentIndex(func::FuncOp op, Value value);
std::pair<size_t, size_t> computeMemory(const std::vector<Value>& allocs);
}
}
namespace mlir {
namespace chlo {
template <typename T>
static Value getConstantLike(OpBuilder& b, Location loc, T constant,
Value val) {
Type ty = getElementTypeOrSelf(val.getType());
auto getAttr = [&]() -> Attribute {
if (mlir::isa<IntegerType>(ty)) return b.getIntegerAttr(ty, constant);
if (mlir::isa<FloatType>(ty)) return b.getFloatAttr(ty, constant);
if (auto complexTy = mlir::dyn_cast<ComplexType>(ty))
return complex::NumberAttr::get(complexTy, constant, 0);
llvm_unreachable("unhandled element type");
};
return b.create<ConstantLikeOp>(loc, cast<TypedAttr>(getAttr()), val);
}
Value getConstantLike(OpBuilder& b, Location loc, const APFloat& constant,
Value val);
Value getConstantLikeMaxFiniteValue(OpBuilder& b, Location loc, Value val);
Value getConstantLikeInfValue(OpBuilder& b, Location loc, Value val,
bool negative);
Value getConstantLikeSmallestFiniteValue(OpBuilder& b, Location loc, Value val);
}
}
#endif
#include "utils/hlo_utils.h"
#include <algorithm>
#include <cassert>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace hlo {
static constexpr size_t kPaddingSize = 64;
DenseI64ArrayAttr getBroadcastDimensionsAttr(Builder* b, Value x, Value y,
bool allowEmpty) {
TensorType xType = mlir::dyn_cast<RankedTensorType>(x.getType());
TensorType yType = mlir::dyn_cast<RankedTensorType>(y.getType());
if (!xType || !yType) return {};
if (allowEmpty && xType == yType) return {};
auto xRank = xType.getRank(), yRank = yType.getRank();
if (allowEmpty && xRank == yRank) return {};
auto maxRank = std::max(xRank, yRank);
auto minRank = std::min(xRank, yRank);
SmallVector<int64_t, 4> broadcastDimensions(minRank);
std::iota(broadcastDimensions.begin(), broadcastDimensions.end(),
maxRank - minRank);
return b->getDenseI64ArrayAttr(broadcastDimensions);
}
DenseElementsAttr getScalarOfType(Type ty, int64_t rawValue) {
RankedTensorType scalarTy = RankedTensorType::get({}, ty);
if (auto floatTy = mlir::dyn_cast<FloatType>(ty)) {
APFloat value(floatTy.getFloatSemantics(), rawValue);
return DenseElementsAttr::get(scalarTy, value);
}
if (auto intTy = mlir::dyn_cast<IntegerType>(ty)) {
APInt value(intTy.getWidth(), static_cast<int64_t>(rawValue),
true);
return DenseElementsAttr::get(scalarTy, value);
}
if (auto complexTy = mlir::dyn_cast<ComplexType>(ty)) {
if (auto floatTy = mlir::cast<FloatType>(complexTy.getElementType())) {
APFloat real(floatTy.getFloatSemantics(), rawValue);
APFloat imag = APFloat::getZero(floatTy.getFloatSemantics());
return DenseElementsAttr::get(scalarTy,
std::complex<APFloat>(real, imag));
}
}
llvm_unreachable("unsupported type");
}
DenseElementsAttr getScalarNegZeroOfType(Type ty) {
RankedTensorType scalarTy = RankedTensorType::get({}, ty);
if (auto floatTy = mlir::dyn_cast<FloatType>(ty)) {
APFloat negZero =
APFloat::getZero(floatTy.getFloatSemantics(), true);
return DenseElementsAttr::get(scalarTy, negZero);
}
if (auto intTy = mlir::dyn_cast<IntegerType>(ty)) {
return DenseElementsAttr::get(scalarTy, APInt::getZero(intTy.getWidth()));
}
if (auto complexTy = mlir::dyn_cast<ComplexType>(ty)) {
if (auto floatTy = mlir::cast<FloatType>(complexTy.getElementType())) {
APFloat negZero =
APFloat::getZero(floatTy.getFloatSemantics(), true);
return DenseElementsAttr::get(scalarTy,
std::complex<APFloat>(negZero, negZero));
}
}
llvm_unreachable("unsupported type");
}
static APFloat getScalarLimitOfFloatType(FloatType floatTy, ScalarLimit limit) {
auto& semantics = floatTy.getFloatSemantics();
switch (limit) {
case kLowest:
return APFloat::getLargest(semantics, true);
case kInfinityLowest:
return APFloat::getInf(semantics, true);
case kMax:
return APFloat::getLargest(semantics, false);
case kInfinityMax:
return APFloat::getInf(semantics, false);
}
llvm_unreachable("invalid limit");
}
static APInt getScalarLimitOfIntegerType(IntegerType integerTy,
ScalarLimit limit) {
unsigned width = integerTy.getWidth();
bool isBool = (width == 1);
switch (limit) {
case kLowest:
case kInfinityLowest:
if (integerTy.isUnsigned() || isBool) {
return APInt::getMinValue(width);
} else {
return APInt::getSignedMinValue(width);
}
case kMax:
case kInfinityMax:
if (integerTy.isUnsigned() || isBool) {
return APInt::getMaxValue(width);
} else {
return APInt::getSignedMaxValue(width);
}
}
llvm_unreachable("invalid limit");
}
DenseElementsAttr getScalarLimitOfType(Type ty, ScalarLimit limit) {
RankedTensorType scalarTy = RankedTensorType::get({}, ty);
if (auto floatTy = mlir::dyn_cast<FloatType>(ty)) {
return DenseElementsAttr::get(scalarTy,
getScalarLimitOfFloatType(floatTy, limit));
}
if (auto integerTy = mlir::dyn_cast<IntegerType>(ty)) {
return DenseElementsAttr::get(
scalarTy, getScalarLimitOfIntegerType(integerTy, limit));
}
llvm_unreachable("unsupported type");
}
std::string lmhloToMhloOpName(llvm::StringRef opName,
mlir::MLIRContext* context) {
assert(opName.starts_with("lmhlo.") && "Expected an LMHLO op");
if (opName == "lmhlo.dot") {
return "mhlo.dot_general";
}
if (opName == "lmhlo.dynamic_slice") {
return "mhlo.dynamic_slice";
}
std::string mhloOpName(opName.drop_front(1));
if (context->isOperationRegistered(mhloOpName)) return mhloOpName;
return "";
}
bool isSequenceStartingWith0(Attribute attr) {
DenseIntElementsAttr denseAttr = mlir::dyn_cast<DenseIntElementsAttr>(attr);
for (int64_t i = 0, e = denseAttr.getNumElements(); i < e; ++i)
if (denseAttr.getValues<APInt>()[i].getSExtValue() != i) return false;
return true;
}
int64_t getArgumentIndex(mlir::func::FuncOp op, Value value) {
BlockArgument arg = mlir::dyn_cast<BlockArgument>(value);
if (!arg || arg.getOwner() != &op.front()) return -1;
return arg.getArgNumber();
}
std::pair<size_t, size_t> computeMemory(const std::vector<Value>& allocs) {
size_t totalSize = 0;
size_t allocCounter = 0;
for (const Value alloc : allocs) {
auto shape = mlir::cast<ShapedType>(alloc.getType());
size_t shapeBytes = llvm::divideCeil(
shape.getNumElements() * shape.getElementTypeBitWidth(), 8);
size_t alignFactor = llvm::divideCeil(shapeBytes, kPaddingSize);
size_t size = alignFactor * kPaddingSize;
totalSize += size;
allocCounter++;
}
return std::make_pair(totalSize, allocCounter);
}
}
}
namespace mlir {
namespace chlo {
Value getConstantLikeMaxFiniteValue(OpBuilder& b, Location loc, Value val) {
auto ty = mlir::cast<FloatType>(getElementTypeOrSelf(val.getType()));
return getConstantLike(
b, loc, llvm::APFloat::getLargest(ty.getFloatSemantics()), val);
}
Value getConstantLikeInfValue(OpBuilder& b, Location loc, Value val,
bool negative) {
auto ty = mlir::cast<FloatType>(getElementTypeOrSelf(val.getType()));
return getConstantLike(
b, loc, llvm::APFloat::getInf(ty.getFloatSemantics(), negative), val);
}
Value getConstantLikeSmallestFiniteValue(OpBuilder& b, Location loc,
Value val) {
auto ty = mlir::cast<FloatType>(getElementTypeOrSelf(val.getType()));
return getConstantLike(
b, loc, llvm::APFloat::getSmallest(ty.getFloatSemantics()), val);
}
Value getConstantLike(OpBuilder& b, Location loc, const APFloat& constant,
Value val) {
Type ty = getElementTypeOrSelf(val.getType());
return b.create<ConstantLikeOp>(loc, b.getFloatAttr(ty, constant), val);
}
}
} | #include "xla/translate/hlo_to_mhlo/hlo_utils.h"
#include <cstdint>
#include <cstring>
#include <vector>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/DebugStringHelper.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
TEST(ConvertTensorShapeToType, Simple) {
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect>();
mlir::Builder builder(&context);
{
auto shape = ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128});
TF_ASSERT_OK_AND_ASSIGN(
auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(shape, builder));
auto expected = mlir::RankedTensorType::get({8, 128}, builder.getI32Type());
EXPECT_TRUE(type == expected)
<< " Expected: " << mlir::debugString(expected)
<< " Computed: " << mlir::debugString(type);
}
{
auto shape =
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128}, {true, false});
TF_ASSERT_OK_AND_ASSIGN(
auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(shape, builder));
int64_t bounds[] = {8, mlir::ShapedType::kDynamic};
auto extensions = mlir::mhlo::TypeExtensionsAttr::get(&context, bounds);
auto expected = mlir::RankedTensorType::get(
{mlir::ShapedType::kDynamic, 128}, builder.getI32Type(), extensions);
EXPECT_TRUE(type == expected)
<< " Expected: " << mlir::debugString(expected)
<< " Computed: " << mlir::debugString(type);
}
}
}
} |
1,825 | cpp | tensorflow/tensorflow | dnn | third_party/xla/xla/stream_executor/dnn.cc | third_party/xla/xla/stream_executor/dnn_test.cc | #ifndef XLA_STREAM_EXECUTOR_DNN_H_
#define XLA_STREAM_EXECUTOR_DNN_H_
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/stream_executor/data_type.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/numeric_options.h"
#include "tsl/platform/logging.h"
#include "tsl/protobuf/dnn.pb.h"
namespace Eigen {
struct half;
}
namespace stream_executor {
class HostBuffer;
class Stream;
class ScratchAllocator;
namespace dnn {
enum class DimIndex : int {
X = 0,
Y = 1,
Z = 2,
};
std::vector<int64_t> ReorderDims(const std::vector<int64_t>& input,
const DataLayout& from, const DataLayout& to);
inline int64_t GetDim(absl::Span<const int64_t> data, DimIndex dim) {
return data.rbegin()[static_cast<int64_t>(dim)];
}
inline void SetDim(absl::Span<int64_t> data, DimIndex dim, int64_t value) {
data.rbegin()[static_cast<int64_t>(dim)] = value;
}
inline void SetDim(std::vector<int64_t>* data, DimIndex dim, int64_t value) {
return SetDim(absl::MakeSpan(*data), dim, value);
}
template <typename T>
inline absl::Span<const int64_t> AsInt64Slice(const T& repeated_field) {
using data_ty =
typename std::remove_reference<decltype(*repeated_field.data())>::type;
static_assert(std::is_integral<data_ty>::value &&
std::is_signed<data_ty>::value && sizeof(data_ty) == 8,
"repeated_field.data() must return a pointer to a signed "
"64-bit integer type.");
return absl::Span<const int64_t>(
reinterpret_cast<const int64_t*>(repeated_field.data()),
repeated_field.size());
}
template <typename T>
inline absl::Span<int64_t> AsInt64Slice(T* repeated_field) {
using data_ty =
typename std::remove_reference<decltype(*repeated_field->data())>::type;
static_assert(std::is_integral<data_ty>::value &&
std::is_signed<data_ty>::value && sizeof(data_ty) == 8,
"repeated_field->data() must return a pointer to a signed "
"64-bit integer type.");
return absl::Span<int64_t>(
reinterpret_cast<int64_t*>(repeated_field->mutable_data()),
repeated_field->size());
}
std::string DataLayoutString(DataLayout layout);
enum class QuantizedActivationMode {
k8Bit = 1,
k16Bit = 2,
k32Bit = 4,
};
enum class RnnMode {
kRnnRelu = 0,
kRnnTanh = 1,
kRnnLstm = 2,
kRnnGru = 3,
};
enum class RnnInputMode {
kRnnLinearSkip = 0,
kRnnSkipInput = 1,
};
enum class RnnDirectionMode {
kRnnUnidirectional = 0,
kRnnBidirectional = 1,
};
class TensorDescriptor {
public:
TensorDescriptor() = default;
absl::StatusOr<std::vector<int64_t>> GetPhysicalDimensionsMajorToMinor()
const;
std::vector<int64_t> GetPhysicalStridesMajorToMinor() const;
std::vector<int64_t> GetLogicalStrides() const;
static TensorDescriptor For(DataType type,
absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major);
int ndims() const;
std::vector<int64_t> dimensions() const { return dimensions_; }
std::vector<int64_t> minor_to_major() const { return minor_to_major_; }
DataType type() const { return d_type_; }
std::string ToString() const;
protected:
TensorDescriptor(DataType type, std::vector<int64_t> dimensions,
std::vector<int64_t> minor_to_major)
: d_type_(type),
dimensions_(dimensions),
minor_to_major_(minor_to_major) {}
private:
DataType d_type_;
std::vector<int64_t> dimensions_;
std::vector<int64_t> minor_to_major_;
};
class MatmulTensorDescriptor {
public:
MatmulTensorDescriptor() = default;
absl::StatusOr<std::vector<int64_t>> GetNonContractingDims() const;
std::vector<int64_t> GetCudnnCompatibleDimensions(
bool is_lhs
) const;
std::vector<int64_t> GetCudnnCompatibleStrides(
bool is_lhs
) const;
absl::StatusOr<std::vector<int64_t>> MakeCudnnCompatible(
const std::vector<int64_t>&, bool is_lhs) const;
static MatmulTensorDescriptor For(DataType type,
absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major,
absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> contracting_dims);
std::vector<int64_t> dimensions() const { return tensor_.dimensions(); }
std::vector<int64_t> minor_to_major() const {
return tensor_.minor_to_major();
}
DataType type() const { return tensor_.type(); }
std::string ToString() const;
protected:
MatmulTensorDescriptor(TensorDescriptor tensor,
std::vector<int64_t> batch_dims,
std::vector<int64_t> contracting_dims)
: tensor_(tensor),
batch_dimension_numbers_(batch_dims),
contracting_dim_(contracting_dims) {}
private:
TensorDescriptor tensor_;
std::vector<int64_t> batch_dimension_numbers_;
std::vector<int64_t> contracting_dim_;
};
class RnnDescriptor {
public:
struct ParamsRegion {
int64_t offset;
int64_t size;
};
typedef std::vector<ParamsRegion> ParamsRegions;
virtual ~RnnDescriptor() = default;
virtual int64_t ParamsSizeInBytes() const { return -1; }
virtual ParamsRegions ParamsWeightRegions() const { return ParamsRegions(); }
virtual ParamsRegions ParamsBiasRegions() const { return ParamsRegions(); }
};
class RnnSequenceTensorDescriptor {
public:
virtual ~RnnSequenceTensorDescriptor() = default;
};
class RnnStateTensorDescriptor {
public:
virtual ~RnnStateTensorDescriptor() = default;
};
std::string QuantizedActivationModeString(QuantizedActivationMode mode);
class BatchDescriptor {
public:
BatchDescriptor();
explicit BatchDescriptor(int ndims);
void CloneFrom(const BatchDescriptor& other);
std::string ToString() const;
std::string ToShortString() const;
TensorDescriptorProto ToProto(DataType data_type) const;
int64_t count() const { return tensor_.dimensions(0); }
int64_t feature_map_count() const { return tensor_.dimensions(1); }
int64_t height() const { return GetDim(spatial_size(), DimIndex::Y); }
int64_t width() const { return GetDim(spatial_size(), DimIndex::X); }
int64_t spatial_dim(DimIndex dim) const {
return GetDim(spatial_size(), dim);
}
int ndims() const { return spatial_size().size(); }
float value_max() const { return value_max_; }
float value_min() const { return value_min_; }
DataLayout layout() const { return tensor_.data_layout(); }
QuantizedActivationMode quantized_activation_mode() const {
return quantized_activation_mode_;
}
std::vector<int64_t> full_dims(const DataLayout& layout) const;
std::vector<int64_t> full_strides(const DataLayout& layout) const;
std::vector<int64_t> vectorized_dims(const DataLayout& layout,
int vector_size, int vector_dim) const;
std::vector<int64_t> vectorized_strides(const DataLayout& layout,
int vector_size,
int vector_dim) const;
BatchDescriptor& set_count(int64_t value) {
tensor_.set_dimensions(0, value);
return *this;
}
BatchDescriptor& set_feature_map_count(int64_t value) {
tensor_.set_dimensions(1, value);
return *this;
}
BatchDescriptor& set_height(int64_t value) {
SetDim(spatial_size(), DimIndex::Y, value);
return *this;
}
BatchDescriptor& set_width(int64_t value) {
SetDim(spatial_size(), DimIndex::X, value);
return *this;
}
BatchDescriptor& set_spatial_dim(DimIndex dim, int64_t value) {
SetDim(spatial_size(), dim, value);
return *this;
}
BatchDescriptor& set_value_max(float value) {
value_max_ = value;
return *this;
}
BatchDescriptor& set_value_min(float value) {
value_min_ = value;
return *this;
}
BatchDescriptor& set_layout(DataLayout layout) {
tensor_.set_data_layout(layout);
return *this;
}
BatchDescriptor& set_quantized_activation_mode(
QuantizedActivationMode quantized_activation_mode) {
quantized_activation_mode_ = quantized_activation_mode;
return *this;
}
int64_t NodesPerFeatureMap() const;
int64_t NodesAcrossFeatureMaps() const;
int64_t ElementCount() const;
static int64_t FullyConnectedWeightCount(const BatchDescriptor& input,
const BatchDescriptor& output);
static int64_t FullyConnectedBiasCount(const BatchDescriptor& output);
static BatchDescriptor DepthConcatenateOutputDescriptor(
absl::Span<const BatchDescriptor> inputs);
private:
absl::Span<const int64_t> spatial_size() const {
return AsInt64Slice(tensor_.dimensions()).subspan(2);
}
absl::Span<int64_t> spatial_size() {
return AsInt64Slice(tensor_.mutable_dimensions()).subspan(2);
}
TensorDescriptorProto tensor_;
float value_max_;
float value_min_;
QuantizedActivationMode quantized_activation_mode_;
};
std::string FilterLayoutString(FilterLayout layout);
class FilterDescriptor {
public:
FilterDescriptor();
explicit FilterDescriptor(int ndims);
~FilterDescriptor();
FilterDescriptor& set_output_feature_map_count(int64_t value) {
tensor_.set_dimensions(0, value);
return *this;
}
FilterDescriptor& set_input_feature_map_count(int64_t value) {
tensor_.set_dimensions(1, value);
return *this;
}
FilterDescriptor& set_input_filter_height(int64_t value) {
SetDim(input_filter_dims(), DimIndex::Y, value);
return *this;
}
FilterDescriptor& set_input_filter_width(int64_t value) {
SetDim(input_filter_dims(), DimIndex::X, value);
return *this;
}
FilterDescriptor& set_layout(FilterLayout layout) {
tensor_.set_filter_layout(layout);
return *this;
}
FilterDescriptor& set_spatial_dim(DimIndex dim, int64_t value) {
SetDim(input_filter_dims(), dim, value);
return *this;
}
int ndims() const { return input_filter_dims().size(); }
void CloneFrom(const FilterDescriptor& other);
std::string ToString() const;
std::string ToShortString() const;
TensorDescriptorProto ToProto(DataType data_type) const;
int64_t ComputeWeightCount() const;
int64_t bias_count() const { return output_feature_map_count(); }
int64_t output_feature_map_count() const { return tensor_.dimensions(0); }
int64_t input_feature_map_count() const { return tensor_.dimensions(1); }
int64_t input_filter_height() const {
return GetDim(input_filter_dims(), DimIndex::Y);
}
int64_t input_filter_width() const {
return GetDim(input_filter_dims(), DimIndex::X);
}
int64_t input_filter_dim(DimIndex dim) const {
return GetDim(input_filter_dims(), dim);
}
FilterLayout layout() const { return tensor_.filter_layout(); }
absl::Span<const int64_t> input_filter_dims() const {
return AsInt64Slice(tensor_.dimensions()).subspan(2);
}
std::vector<int64_t> full_dims(const FilterLayout& layout) const;
std::vector<int64_t> full_strides(const FilterLayout& layout) const;
std::vector<int64_t> vectorized_dims(const FilterLayout& layout,
int vector_size, int vector_dim) const;
std::vector<int64_t> vectorized_strides(const FilterLayout& layout,
int vector_size,
int vector_dim) const;
private:
absl::Span<int64_t> input_filter_dims() {
return AsInt64Slice(tensor_.mutable_dimensions()).subspan(2);
}
TensorDescriptorProto tensor_;
};
enum class PadAlignment : int64_t {
kDefault = 0,
kCudnnPadding,
kTensorFlowPadding,
};
std::string PadAlignmentString(PadAlignment alignment);
std::ostream& operator<<(std::ostream& str, PadAlignment alignment);
class ConvolutionDescriptor {
public:
ConvolutionDescriptor();
explicit ConvolutionDescriptor(int ndims);
~ConvolutionDescriptor();
std::string ToString() const;
std::string ToShortString() const;
ConvolutionDescriptorProto ToProto() const { return proto_; }
ConvolutionDescriptor& set_zero_padding_height(int64_t value) {
SetDim(padding(), DimIndex::Y, value);
return *this;
}
ConvolutionDescriptor& set_zero_padding_width(int64_t value) {
SetDim(padding(), DimIndex::X, value);
return *this;
}
ConvolutionDescriptor& set_zero_padding(DimIndex dim, int64_t value) {
SetDim(padding(), dim, value);
return *this;
}
ConvolutionDescriptor& set_vertical_filter_stride(int64_t value) {
SetDim(strides(), DimIndex::Y, value);
return *this;
}
ConvolutionDescriptor& set_horizontal_filter_stride(int64_t value) {
SetDim(strides(), DimIndex::X, value);
return *this;
}
ConvolutionDescriptor& set_filter_stride(DimIndex dim, int64_t value) {
SetDim(strides(), dim, value);
return *this;
}
ConvolutionDescriptor& set_vertical_dilation_rate(int64_t value) {
SetDim(dilations(), DimIndex::Y, value);
return *this;
}
ConvolutionDescriptor& set_horizontal_dilation_rate(int64_t value) {
SetDim(dilations(), DimIndex::X, value);
return *this;
}
ConvolutionDescriptor& set_dilation_rate(DimIndex dim, int64_t value) {
SetDim(dilations(), dim, value);
return *this;
}
ConvolutionDescriptor& set_group_count(int group_count) {
proto_.set_group_count(group_count);
return *this;
}
ConvolutionDescriptor& set_convolution_not_crosscorr(bool conv) {
proto_.set_convolution_mode(conv ? ConvolutionMode::CONVOLUTION
: ConvolutionMode::CROSS_CORRELATION);
return *this;
}
ConvolutionDescriptor& set_name(const std::string& name) {
proto_.set_name(name);
return *this;
}
int64_t zero_padding_height() const { return GetDim(padding(), DimIndex::Y); }
int64_t zero_padding_width() const { return GetDim(padding(), DimIndex::X); }
int64_t vertical_filter_stride() const {
return GetDim(strides(), DimIndex::Y);
}
int64_t horizontal_filter_stride() const {
return GetDim(strides(), DimIndex::X);
}
int64_t vertical_dilation_rate() const {
return GetDim(dilations(), DimIndex::Y);
}
int64_t horizontal_dilation_rate() const {
return GetDim(dilations(), DimIndex::X);
}
int zero_padding(DimIndex dim) const { return GetDim(padding(), dim); }
int filter_stride(DimIndex dim) const { return GetDim(strides(), dim); }
int dilation_rate(DimIndex dim) const { return GetDim(dilations(), dim); }
PadAlignment pad_alignment() const { return PadAlignment::kDefault; }
int group_count() const { return proto_.group_count(); }
int ndims() const { return padding().size(); }
bool convolution_not_crosscorr() const {
return proto_.convolution_mode() == ConvolutionMode::CONVOLUTION;
}
absl::Span<const int64_t> strides() const {
return AsInt64Slice(proto_.strides());
}
absl::Span<const int64_t> dilations() const {
return AsInt64Slice(proto_.dilations());
}
absl::Span<const int64_t> padding() const {
return AsInt64Slice(proto_.paddings());
}
std::string name() const { return proto_.name(); }
private:
absl::Span<int64_t> strides() {
return AsInt64Slice(proto_.mutable_strides());
}
absl::Span<int64_t> dilations() {
return AsInt64Slice(proto_.mutable_dilations());
}
absl::Span<int64_t> padding() {
return AsInt64Slice(proto_.mutable_paddings());
}
ConvolutionDescriptorProto proto_;
};
enum class PoolingMode : int64_t {
kMaximum,
kAverage,
};
enum class SpaceConcatenateMode : int64_t {
XDirection,
YDirection,
};
std::string ShortPoolingModeString(PoolingMode mode);
class PoolingDescriptor {
public:
PoolingDescriptor();
explicit PoolingDescriptor(int ndims);
PoolingDescriptor& set_pooling_mode(PoolingMode value) {
mode_ = value;
return *this;
}
PoolingDescriptor& set_window_height(int64_t value) {
SetDim(&window_, DimIndex::Y, value);
return *this;
}
PoolingDescriptor& set_window_width(int64_t value) {
SetDim(&window_, DimIndex::X, value);
return *this;
}
PoolingDescriptor& set_window(DimIndex dim, int64_t value) {
SetDim(&window_, dim, value);
return *this;
}
PoolingDescriptor& set_vertical_padding(int64_t value) {
SetDim(&padding_, DimIndex::Y, value);
return *this;
}
PoolingDescriptor& set_horizontal_padding(int64_t value) {
SetDim(&padding_, DimIndex::X, value);
return *this;
}
PoolingDescriptor& set_padding(DimIndex dim, int64_t value) {
SetDim(&padding_, dim, value);
return *this;
}
PoolingDescriptor& set_vertical_stride(int64_t value) {
SetDim(&strides_, DimIndex::Y, value);
return *this;
}
PoolingDescriptor& set_horizontal_stride(int64_t value) {
SetDim(&strides_, DimIndex::X, value);
return *this;
}
PoolingDescriptor& set_stride(DimIndex dim, int64_t value) {
SetDim(&strides_, dim, value);
return *this;
}
PoolingDescriptor& set_propagate_nans(bool value) {
propagate_nans_ = value;
return *this;
}
PoolingDescriptor& set_name(const std::string& name) {
name_ = name;
return *this;
}
int ndims() const { return ndims_; }
void CloneFrom(const PoolingDescriptor& other);
std::string ToString() const;
std::string ToShortString() const;
PoolingMode mode() const { return mode_; }
int64_t window_height() const { return GetDim(window_, DimIndex::Y); }
int64_t window_width() const { return GetDim(window_, DimIndex::X); }
int64_t window(DimIndex dim) const { return GetDim(window_, dim); }
int64_t vertical_padding() const { return GetDim(padding_, DimIndex::Y); }
int64_t horizontal_padding() const { return GetDim(padding_, DimIndex::X); }
int64_t padding(DimIndex dim) const { return GetDim(padding_, dim); }
int64_t vertical_stride() const { return GetDim(strides_, DimIndex::Y); }
int64_t horizontal_stride() const { return GetDim(strides_, DimIndex::X); }
int64_t stride(DimIndex dim) const { return GetDim(strides_, dim); }
absl::Span<const int64_t> window() const { return window_; }
absl::Span<const int64_t> padding() const { return padding_; }
absl::Span<const int64_t> strides() const { return strides_; }
bool propagate_nans() const { return propagate_nans_; }
std::string name() const { return name_; }
private:
PoolingMode mode_;
int ndims_;
bool propagate_nans_;
std::string name_;
std::vector<int64_t> window_;
std::vector<int64_t> padding_;
std::vector<int64_t> strides_;
};
class AlgorithmDesc {
public:
typedef int64_t Index;
AlgorithmDesc() : AlgorithmDesc(0, false, std::nullopt) {}
explicit AlgorithmDesc(AlgorithmProto proto) : proto_(std::move(proto)) {}
AlgorithmDesc(Index algo_id, bool use_tensor_ops)
: AlgorithmDesc(algo_id, use_tensor_ops, std::nullopt) {}
AlgorithmDesc(Index algo_id, bool use_tensor_ops,
std::optional<uint64_t> workspace_size) {
proto_.set_is_cudnn_frontend(false);
proto_.set_algo_id(algo_id);
proto_.set_math_type(use_tensor_ops ? AlgorithmProto::TENSOR_OP_MATH
: AlgorithmProto::DEFAULT_MATH);
if (workspace_size) {
proto_.mutable_workspace_size()->set_value(*workspace_size);
}
}
AlgorithmDesc(int64_t engine_id,
const std::vector<std::pair<int64_t, int64_t>>& tuning_knobs,
std::optional<uint64_t> workspace_size);
bool is_cudnn_frontend() const { return proto_.is_cudnn_frontend(); }
bool tensor_ops_enabled() const {
return proto_.math_type() == AlgorithmProto::TENSOR_OP_MATH;
}
std::optional<uint64_t> workspace_size() const {
if (proto_.has_workspace_size()) {
return proto_.workspace_size().value();
}
return std::nullopt;
}
Index algo_id() const { return proto_.algo_id(); }
std::vector<std::pair<int64_t, int64_t>> TuningKnobs() const;
bool operator==(const AlgorithmDesc& other) const;
uint64_t hash() const;
template <typename H>
friend H AbslHashValue(H h, const AlgorithmDesc& algo_desc);
AlgorithmProto ToProto() const { return proto_; }
std::string ToString() const;
private:
AlgorithmProto proto_;
};
template <typename H>
H AbslHashValue(H h, const AlgorithmDesc& algo_desc) {
return H::combine(std::move(h), algo_desc.hash());
}
class ProfileResult {
public:
bool is_valid() const {
return algorithm_.ha | #include "xla/stream_executor/dnn.h"
#include <tuple>
#include <vector>
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DnnTest, AlgorithmDescToString) {
dnn::AlgorithmDesc desc(17, {{12, 1}, {1, 0}, {3, 1}}, 0);
EXPECT_EQ(desc.ToString(), "eng17{k1=0,k3=1,k12=1}");
}
TEST(DnnTest, VersionInfoComparisonOperators) {
std::vector<std::tuple<int, int, int>> vs;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
vs.push_back(std::make_tuple(i, j, k));
}
}
}
for (const auto& a : vs) {
for (const auto& b : vs) {
auto [a1, a2, a3] = a;
auto [b1, b2, b3] = b;
dnn::VersionInfo va(a1, a2, a3);
dnn::VersionInfo vb(b1, b2, b3);
EXPECT_EQ((a == b), va == vb);
EXPECT_EQ((a != b), va != vb);
EXPECT_EQ((a < b), va < vb);
EXPECT_EQ((a <= b), va <= vb);
EXPECT_EQ((a > b), va > vb);
EXPECT_EQ((a >= b), va >= vb);
}
}
}
}
} |
1,826 | cpp | tensorflow/tensorflow | device_memory_handle | third_party/xla/xla/stream_executor/device_memory_handle.cc | third_party/xla/xla/stream_executor/device_memory_handle_test.cc | #ifndef XLA_STREAM_EXECUTOR_DEVICE_MEMORY_HANDLE_H_
#define XLA_STREAM_EXECUTOR_DEVICE_MEMORY_HANDLE_H_
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
class DeviceMemoryHandle {
public:
DeviceMemoryHandle() : memory_(), executor_(nullptr) {}
DeviceMemoryHandle(StreamExecutor *executor, DeviceMemoryBase memory);
~DeviceMemoryHandle();
DeviceMemoryHandle(DeviceMemoryHandle &&other) noexcept;
DeviceMemoryHandle &operator=(DeviceMemoryHandle &&other) noexcept;
const DeviceMemoryBase &memory() const { return memory_; }
DeviceMemoryBase *memory_ptr() { return &memory_; }
private:
void Free();
DeviceMemoryBase memory_;
StreamExecutor *executor_;
};
}
#endif
#include "xla/stream_executor/device_memory_handle.h"
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
DeviceMemoryHandle::DeviceMemoryHandle(StreamExecutor *executor,
DeviceMemoryBase memory)
: memory_(std::move(memory)), executor_(executor) {}
DeviceMemoryHandle::DeviceMemoryHandle(DeviceMemoryHandle &&other) noexcept
: memory_(std::move(other.memory_)), executor_(other.executor_) {
other.memory_ = DeviceMemoryBase();
}
DeviceMemoryHandle::~DeviceMemoryHandle() { Free(); }
void DeviceMemoryHandle::Free() {
if (!memory_.is_null()) {
executor_->Deallocate(&memory_);
}
}
DeviceMemoryHandle &DeviceMemoryHandle::operator=(
DeviceMemoryHandle &&other) noexcept {
Free();
memory_ = std::move(other.memory_);
other.memory_ = DeviceMemoryBase();
executor_ = other.executor_;
return *this;
}
} | #include "xla/stream_executor/device_memory_handle.h"
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/mock_stream_executor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DeviceMemoryHandle, NullMemoryNoDeallocate) {
DeviceMemoryBase null_memory;
MockStreamExecutor executor;
EXPECT_CALL(executor, Deallocate).Times(0);
{ DeviceMemoryHandle releaser(&executor, null_memory); }
}
TEST(DeviceMemoryHandle, Deallocates) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{ DeviceMemoryHandle releaser(&executor, memory); }
}
TEST(DeviceMemoryHandle, MoveDeallocatesOnce) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{
DeviceMemoryHandle releaser(&executor, memory);
DeviceMemoryHandle releaser_moved(std::move(releaser));
}
}
TEST(DeviceMemoryHandle, MoveAssignmentDeallocatesOnce) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{
DeviceMemoryHandle releaser(&executor, memory);
DeviceMemoryHandle releaser2;
releaser2 = std::move(releaser);
}
}
}
} |
1,827 | cpp | tensorflow/tensorflow | tf_allocator_adapter | third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter.cc | third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter_test.cc | #ifndef XLA_STREAM_EXECUTOR_INTEGRATIONS_TF_ALLOCATOR_ADAPTER_H_
#define XLA_STREAM_EXECUTOR_INTEGRATIONS_TF_ALLOCATOR_ADAPTER_H_
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
class TfAllocatorAdapter : public DeviceMemoryAllocator {
public:
TfAllocatorAdapter(tsl::Allocator *wrapped, Stream *stream);
TfAllocatorAdapter(tsl::Allocator *wrapped, Platform *platform);
~TfAllocatorAdapter() override;
absl::StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64_t size,
bool retry_on_failure,
int64_t memory_space) override;
absl::Status Deallocate(int device_ordinal, DeviceMemoryBase mem) override;
bool AllowsAsynchronousDeallocation() const override { return true; }
absl::StatusOr<Stream *> GetStream(int device_ordinal) override;
absl::StatusOr<tsl::Allocator *> GetAllocator(int device_ordinal);
private:
tsl::Allocator *wrapped_;
Stream *stream_;
};
class MultiDeviceAdapter : public DeviceMemoryAllocator {
public:
struct AllocatorInfo {
std::unique_ptr<tsl::Allocator> allocator;
Stream *stream;
int64_t memory_space;
std::optional<int> device_ordinal = std::nullopt;
AllocatorInfo(std::unique_ptr<tsl::Allocator> allocator, Stream *stream,
int64_t memory_space,
std::optional<int> device_ordinal = std::nullopt)
: allocator(std::move(allocator)),
stream(stream),
memory_space(memory_space),
device_ordinal(device_ordinal) {}
};
MultiDeviceAdapter(const Platform *platform,
std::vector<AllocatorInfo> tf_allocators)
: DeviceMemoryAllocator(platform) {
tf_allocators_.reserve(tf_allocators.size());
for (AllocatorInfo &info : tf_allocators) {
auto &per_device_allocators =
memory_space_to_per_device_allocators_[info.memory_space];
int device_ordinal = info.device_ordinal.has_value()
? *info.device_ordinal
: info.stream->parent()->device_ordinal();
if (per_device_allocators.size() <= device_ordinal) {
per_device_allocators.resize(device_ordinal + 1);
}
CHECK(!per_device_allocators[device_ordinal]);
per_device_allocators[device_ordinal] =
std::make_unique<TfAllocatorAdapter>(info.allocator.get(),
info.stream);
tf_allocators_.push_back(std::move(info.allocator));
}
}
absl::StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64_t size,
bool retry_on_failure,
int64_t memory_space) override {
auto it = memory_space_to_per_device_allocators_.find(memory_space);
CHECK(it != memory_space_to_per_device_allocators_.end());
CHECK_LT(device_ordinal, it->second.size());
TF_ASSIGN_OR_RETURN(
auto result, it->second[device_ordinal]->Allocate(
device_ordinal, size, retry_on_failure, memory_space));
absl::MutexLock lock(&mu_);
buffer_memory_spaces_[{device_ordinal, result->opaque()}] = memory_space;
return result;
}
absl::Status Deallocate(int device_ordinal, DeviceMemoryBase mem) override {
if (mem.opaque() == nullptr) return absl::OkStatus();
int64_t memory_space;
{
absl::MutexLock lock(&mu_);
auto it = buffer_memory_spaces_.find({device_ordinal, mem.opaque()});
if (it == buffer_memory_spaces_.end()) {
return memory_space_to_per_device_allocators_[0][device_ordinal]
->Deallocate(device_ordinal, mem);
}
memory_space = it->second;
buffer_memory_spaces_.erase(it);
}
auto it = memory_space_to_per_device_allocators_.find(memory_space);
CHECK(it != memory_space_to_per_device_allocators_.end());
CHECK_LT(device_ordinal, it->second.size());
return it->second[device_ordinal]->Deallocate(device_ordinal, mem);
}
bool AllowsAsynchronousDeallocation() const override { return true; }
absl::StatusOr<Stream *> GetStream(int device_ordinal) override {
return memory_space_to_per_device_allocators_[0][device_ordinal]->GetStream(
device_ordinal);
}
absl::StatusOr<tsl::Allocator *> GetAllocator(int device_ordinal) {
return memory_space_to_per_device_allocators_[0][device_ordinal]
->GetAllocator(device_ordinal);
}
private:
absl::flat_hash_map<int64_t, std::vector<std::unique_ptr<TfAllocatorAdapter>>>
memory_space_to_per_device_allocators_;
absl::Mutex mu_;
absl::flat_hash_map<std::pair<int, void *>, int64_t> buffer_memory_spaces_
ABSL_GUARDED_BY(mu_);
std::vector<std::unique_ptr<tsl::Allocator>> tf_allocators_;
};
}
#endif
#include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/errors.h"
namespace stream_executor {
TfAllocatorAdapter::TfAllocatorAdapter(tsl::Allocator *wrapped, Stream *stream)
: DeviceMemoryAllocator(stream->parent()->GetPlatform()),
wrapped_(wrapped),
stream_(stream) {}
TfAllocatorAdapter::TfAllocatorAdapter(tsl::Allocator *wrapped,
Platform *platform)
: DeviceMemoryAllocator(platform), wrapped_(wrapped), stream_(nullptr) {}
TfAllocatorAdapter::~TfAllocatorAdapter() {}
absl::StatusOr<OwningDeviceMemory> TfAllocatorAdapter::Allocate(
int device_ordinal, uint64_t size, bool retry_on_failure,
int64_t memory_space) {
tsl::AllocationAttributes attrs;
attrs.retry_on_failure = retry_on_failure;
void *data = nullptr;
if (size != 0) {
data =
wrapped_->AllocateRaw(tsl::Allocator::kAllocatorAlignment, size, attrs);
if (data == nullptr) {
return absl::ResourceExhaustedError(absl::StrCat(
"Out of memory while trying to allocate ", size, " bytes."));
}
}
return OwningDeviceMemory(DeviceMemoryBase(data, size), device_ordinal, this);
}
absl::Status TfAllocatorAdapter::Deallocate(int device_ordinal,
DeviceMemoryBase mem) {
wrapped_->DeallocateRaw(mem.opaque());
return absl::OkStatus();
}
absl::StatusOr<Stream *> TfAllocatorAdapter::GetStream(int device_ordinal) {
CHECK_EQ(stream_->parent()->device_ordinal(), device_ordinal);
return stream_;
}
absl::StatusOr<tsl::Allocator *> TfAllocatorAdapter::GetAllocator(
int device_ordinal) {
if (stream_ == nullptr) {
return absl::UnavailableError("stream_ is null for TfAllocatorAdapter.");
}
if (stream_->parent()->device_ordinal() != device_ordinal) {
return absl::InternalError(
absl::StrCat("stream_->parent()->device_ordinal() ",
stream_->parent()->device_ordinal(),
" not equal to device_ordinal ", device_ordinal));
}
return wrapped_;
}
} | #include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_set.h"
#include "absl/log/check.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
class TestAllocator : public tsl::Allocator {
public:
explicit TestAllocator(
size_t start_address,
std::shared_ptr<absl::flat_hash_set<void*>> allocations = nullptr)
: start_address_(start_address), allocations_(allocations) {
if (allocations_ == nullptr) {
allocations_ = std::make_shared<absl::flat_hash_set<void*>>();
}
}
std::string Name() override { return "test"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
void* ptr = reinterpret_cast<void*>(++start_address_);
allocations_->insert(ptr);
return ptr;
}
void DeallocateRaw(void* ptr) override {
auto it = allocations_->find(ptr);
if (it == allocations_->end()) {
ADD_FAILURE() << "Allocation not found (double free?)";
} else {
allocations_->erase(it);
}
}
private:
size_t start_address_;
std::shared_ptr<absl::flat_hash_set<void*>> allocations_;
};
TEST(MultiDeviceAdapter, UsesCorrectAllocator) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
xla::PlatformUtil::GetStreamExecutors(platform))
TF_ASSERT_OK_AND_ASSIGN(auto stream, executors[0]->CreateStream());
std::vector<se::MultiDeviceAdapter::AllocatorInfo> infos;
infos.emplace_back(std::make_unique<TestAllocator>(0x1000), stream.get(),
0, 0);
infos.emplace_back(std::make_unique<TestAllocator>(0x2000), stream.get(),
0, 1);
infos.emplace_back(std::make_unique<TestAllocator>(0x3000), stream.get(),
1, 0);
infos.emplace_back(std::make_unique<TestAllocator>(0x4000), stream.get(),
1, 1);
std::unique_ptr<se::DeviceMemoryAllocator> allocator =
std::make_unique<se::MultiDeviceAdapter>(platform, std::move(infos));
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff0,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff0->opaque()), 0x1001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff1,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff1->opaque()), 0x1002);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff2,
allocator->Allocate(0, 4, false, 1));
CHECK_EQ(reinterpret_cast<size_t>(buff2->opaque()), 0x3001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff3,
allocator->Allocate(1, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff3->opaque()), 0x2001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff4,
allocator->Allocate(1, 4, false, 1));
CHECK_EQ(reinterpret_cast<size_t>(buff4->opaque()), 0x4001);
}
TEST(MultiDeviceAdapter, DeallocationWithDifferentAllocator) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
xla::PlatformUtil::GetStreamExecutors(platform));
TF_ASSERT_OK_AND_ASSIGN(auto stream, executors[0]->CreateStream());
std::shared_ptr<absl::flat_hash_set<void*>> allocations =
std::make_shared<absl::flat_hash_set<void*>>();
std::vector<se::MultiDeviceAdapter::AllocatorInfo> info_allocator;
info_allocator.emplace_back(
std::make_unique<TestAllocator>(0x1000, allocations), stream.get(),
0, 0);
std::unique_ptr<se::DeviceMemoryAllocator> allocator =
std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(info_allocator));
std::vector<se::MultiDeviceAdapter::AllocatorInfo> info_deallocator;
info_deallocator.emplace_back(
std::make_unique<TestAllocator>(0x1000, allocations), stream.get(),
0, 0);
std::unique_ptr<se::DeviceMemoryAllocator> deallocator =
std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(info_deallocator));
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff0,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(allocations->size(), 1);
CHECK_EQ(reinterpret_cast<size_t>(buff0->opaque()), 0x1001);
TF_CHECK_OK(deallocator->Deallocate(0, buff0.cref()));
CHECK_EQ(allocations->size(), 0);
allocations->insert(buff0->opaque());
} |
1,828 | cpp | tensorflow/tensorflow | host_kernel | third_party/xla/xla/stream_executor/host/host_kernel.cc | third_party/xla/xla/stream_executor/host/host_kernel_test.cc | #ifndef XLA_STREAM_EXECUTOR_HOST_HOST_KERNEL_H_
#define XLA_STREAM_EXECUTOR_HOST_HOST_KERNEL_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/chain.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor::host {
class HostExecutor;
class HostKernel : public Kernel {
public:
using Task = absl::AnyInvocable<void()>;
using TaskRunner = absl::AnyInvocable<void(Task)>;
using LaunchEvent = tsl::Chain;
class KernelFunction {
public:
virtual ~KernelFunction() = default;
virtual SE_HOST_Kernel* kernel() const = 0;
};
class KernelFunctionPtr final : public KernelFunction {
public:
explicit KernelFunctionPtr(SE_HOST_Kernel* ptr) : ptr_(ptr) {}
SE_HOST_Kernel* kernel() const override { return ptr_; }
private:
SE_HOST_Kernel* ptr_;
};
explicit HostKernel(std::shared_ptr<tsl::thread::ThreadPool> thread_pool);
HostKernel(unsigned arity, SE_HOST_Kernel* kernel,
std::shared_ptr<tsl::thread::ThreadPool> thread_pool = nullptr);
absl::Status Launch(const ThreadDim& thread_dims,
absl::Span<const DeviceMemoryBase> buffers) const;
absl::Status Launch(const ThreadDim& thread_dims,
absl::Span<const SE_HOST_KernelArg> args) const;
tsl::AsyncValueRef<LaunchEvent> Launch(
const ThreadDim& thread_dims, absl::Span<const DeviceMemoryBase> buffers,
TaskRunner task_runner) const;
tsl::AsyncValueRef<LaunchEvent> Launch(
const ThreadDim& thread_dims, absl::Span<const SE_HOST_KernelArg> args,
TaskRunner task_runner) const;
absl::StatusOr<int32_t> GetMaxOccupiedBlocksPerCore(ThreadDim,
size_t) const override {
return 1;
};
void SetArity(unsigned arity) { arity_ = arity; };
unsigned Arity() const override { return arity_; };
template <typename T,
std::enable_if_t<std::is_base_of_v<KernelFunction, T>>* = nullptr>
void SetKernelFunction(std::unique_ptr<T> function) {
function_ = std::move(function);
kernel_ = function_->kernel();
}
private:
std::unique_ptr<KernelFunction> function_;
SE_HOST_Kernel* kernel_;
unsigned arity_;
std::shared_ptr<tsl::thread::ThreadPool> thread_pool_;
};
inline const HostKernel* AsHostKernel(const Kernel* kernel) {
return static_cast<const HostKernel*>(kernel);
}
inline HostKernel* AsHostKernel(Kernel* kernel) {
return static_cast<HostKernel*>(kernel);
}
}
#endif
#include "xla/stream_executor/host/host_kernel.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor::host {
using LaunchEvent = HostKernel::LaunchEvent;
static tsl::AsyncValueRef<LaunchEvent> OkLaunchEvent() {
static tsl::AsyncValueOwningRef<LaunchEvent>* event = [] {
auto* storage = new tsl::internal::AsyncValueStorage<LaunchEvent>();
return new tsl::AsyncValueOwningRef<LaunchEvent>(
tsl::MakeAvailableAsyncValueRef<LaunchEvent>(*storage));
}();
return event->AsRef();
}
static absl::InlinedVector<SE_HOST_KernelArg, 8> ConvertBuffersToKernelArgs(
absl::Span<const DeviceMemoryBase> buffers) {
absl::InlinedVector<SE_HOST_KernelArg, 8> args(buffers.size());
for (size_t i = 0; i < buffers.size(); ++i) {
args[i].data = const_cast<void*>(buffers[i].opaque());
args[i].size = buffers[i].size();
}
return args;
}
namespace {
class HostKernelExecuteState
: public tsl::ReferenceCounted<HostKernelExecuteState> {
public:
HostKernelExecuteState(HostKernel::TaskRunner task_runner,
SE_HOST_Kernel* kernel, ThreadDim thread_dims,
absl::Span<const SE_HOST_KernelArg> args);
void Notify(absl::Status status);
void CallSync(uint64_t task_index);
void CallAsync(uint64_t start_index, uint64_t end_index);
tsl::AsyncValueRef<LaunchEvent> event() const { return event_; }
private:
SE_HOST_KernelThread Delinearize(uint64_t task_index);
HostKernel::TaskRunner task_runner_;
size_t num_tasks_;
SE_HOST_Kernel* kernel_;
SE_HOST_KernelThreadDim thread_dims_;
absl::InlinedVector<SE_HOST_KernelArg, 8> args_;
std::atomic<bool> abort_;
absl::Mutex abort_mutex_;
absl::Status abort_status_ ABSL_GUARDED_BY(abort_mutex_);
std::atomic<int64_t> counter_;
tsl::AsyncValueRef<LaunchEvent> event_;
};
}
HostKernel::HostKernel(std::shared_ptr<tsl::thread::ThreadPool> thread_pool)
: thread_pool_(thread_pool) {
}
HostKernel::HostKernel(unsigned arity, SE_HOST_Kernel* kernel,
std::shared_ptr<tsl::thread::ThreadPool> thread_pool)
: function_(std::make_unique<KernelFunctionPtr>(kernel)),
kernel_(function_->kernel()),
arity_(arity),
thread_pool_(thread_pool) {}
absl::Status HostKernel::Launch(
const ThreadDim& thread_dims,
absl::Span<const DeviceMemoryBase> buffers) const {
return Launch(thread_dims, ConvertBuffersToKernelArgs(buffers));
}
absl::Status HostKernel::Launch(
const ThreadDim& thread_dims,
absl::Span<const SE_HOST_KernelArg> args) const {
SE_HOST_KernelThreadDim kernel_thread_dims = {
thread_dims.x,
thread_dims.y,
thread_dims.z,
};
for (uint64_t z = 0; z < thread_dims.z; ++z) {
for (uint64_t y = 0; y < thread_dims.y; ++y) {
for (uint64_t x = 0; x < thread_dims.x; ++x) {
SE_HOST_KernelThread kernel_thread = {x, y, z};
SE_HOST_KernelCallFrame call_frame = {
&kernel_thread_dims, &kernel_thread, args.size(), args.data()};
SE_HOST_KernelError* error = (*kernel_)(&call_frame);
if (ABSL_PREDICT_FALSE(error != nullptr)) {
return absl::InternalError("Failed to call host kernel");
}
}
}
}
return absl::OkStatus();
}
tsl::AsyncValueRef<LaunchEvent> HostKernel::Launch(
const ThreadDim& thread_dims, absl::Span<const DeviceMemoryBase> buffers,
TaskRunner task_runner) const {
return Launch(thread_dims, ConvertBuffersToKernelArgs(buffers),
std::move(task_runner));
}
tsl::AsyncValueRef<LaunchEvent> HostKernel::Launch(
const ThreadDim& thread_dims, absl::Span<const SE_HOST_KernelArg> args,
TaskRunner task_runner) const {
size_t num_tasks = thread_dims.x * thread_dims.y * thread_dims.z;
CHECK_GT(num_tasks, 0) << "Number of tasks must be positive";
if (ABSL_PREDICT_TRUE(num_tasks == 1)) {
absl::Status launched = Launch(thread_dims, args);
return ABSL_PREDICT_TRUE(launched.ok())
? OkLaunchEvent()
: tsl::MakeErrorAsyncValueRef(std::move(launched));
}
auto state = tsl::MakeRef<HostKernelExecuteState>(std::move(task_runner),
kernel_, thread_dims, args);
state->CallAsync(0, num_tasks);
return state->event();
}
HostKernelExecuteState::HostKernelExecuteState(
HostKernel::TaskRunner task_runner, SE_HOST_Kernel kernel,
ThreadDim thread_dims, absl::Span<const SE_HOST_KernelArg> args)
: task_runner_(std::move(task_runner)),
num_tasks_(thread_dims.x * thread_dims.y * thread_dims.z),
kernel_(kernel),
thread_dims_({thread_dims.x, thread_dims.y, thread_dims.z}),
args_(args.begin(), args.end()),
abort_(false),
counter_(num_tasks_),
event_(tsl::MakeConstructedAsyncValueRef<LaunchEvent>()) {}
void HostKernelExecuteState::Notify(absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
absl::MutexLock lock(&abort_mutex_);
abort_.store(true, std::memory_order_relaxed);
abort_status_.Update(std::move(status));
}
bool is_done = counter_.load(std::memory_order_relaxed) == 1 ||
counter_.fetch_sub(1, std::memory_order_relaxed) == 1;
if (ABSL_PREDICT_TRUE(!is_done)) return;
if (ABSL_PREDICT_FALSE(abort_.load(std::memory_order_relaxed))) {
absl::MutexLock lock(&abort_mutex_);
event_.SetError(std::move(abort_status_));
} else {
event_.SetStateConcrete();
}
}
void HostKernelExecuteState::CallSync(uint64_t task_index) {
CHECK_LT(task_index, num_tasks_) << "Task index out of range";
if (ABSL_PREDICT_FALSE(abort_.load(std::memory_order_relaxed))) {
Notify(absl::OkStatus());
return;
}
SE_HOST_KernelThread kernel_thread = Delinearize(task_index);
SE_HOST_KernelCallFrame call_frame = {&thread_dims_, &kernel_thread,
args_.size(), args_.data()};
SE_HOST_KernelError* error = (*kernel_)(&call_frame);
if (ABSL_PREDICT_TRUE(error == nullptr)) {
Notify(absl::OkStatus());
} else {
Notify(absl::InternalError(
absl::StrFormat("Failed to call host kernel: x=%d, y=%d, z=%d",
kernel_thread.x, kernel_thread.y, kernel_thread.z)));
}
}
void HostKernelExecuteState::CallAsync(uint64_t start_index,
uint64_t end_index) {
CHECK_LT(start_index, end_index) << "Invalid task index range";
while (end_index - start_index > 1) {
uint64_t mid_index = (start_index + end_index) / 2;
task_runner_([self = tsl::FormRef(this), mid_index, end_index] {
self->CallAsync(mid_index, end_index);
});
end_index = mid_index;
}
CallSync(start_index);
}
SE_HOST_KernelThread HostKernelExecuteState::Delinearize(uint64_t task_index) {
uint64_t stride_z = thread_dims_.y * thread_dims_.x;
uint64_t stride_y = thread_dims_.x;
uint64_t z = task_index / stride_z;
task_index = task_index % stride_z;
uint64_t y = task_index / stride_y;
task_index = task_index % stride_y;
uint64_t x = task_index;
return SE_HOST_KernelThread{x, y, z};
}
} | #include "xla/stream_executor/host/host_kernel.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor::host {
static auto ToCopyableTask(HostKernel::Task task) {
return [shared_task = std::make_shared<decltype(task)>(std::move(task))] {
(*shared_task)();
};
}
static SE_HOST_KernelError* AddI32(const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& lhs = call_frame->args[0];
const SE_HOST_KernelArg& rhs = call_frame->args[1];
const SE_HOST_KernelArg& out = call_frame->args[2];
int32_t* lhs_ptr = reinterpret_cast<int32_t*>(lhs.data);
int32_t* rhs_ptr = reinterpret_cast<int32_t*>(rhs.data);
int32_t* out_ptr = reinterpret_cast<int32_t*>(out.data);
const auto zstep = call_frame->thread_dims->x * call_frame->thread_dims->y;
const auto ystep = call_frame->thread_dims->x;
uint64_t i = call_frame->thread->x + call_frame->thread->y * ystep +
call_frame->thread->z * zstep;
*(out_ptr + i) = *(lhs_ptr + i) + *(rhs_ptr + i);
return nullptr;
}
static const char* llvm_kernel_add = R"(
%SE_HOST_KernelCallFrame = type { ptr, ptr, i64, ptr }
%struct.SE_HOST_KernelArg = type { ptr, i64 }
define ptr @LlvmAddI32(ptr noundef %0) {
%2 = getelementptr inbounds %SE_HOST_KernelCallFrame, ptr %0, i32 0, i32 3
%3 = load ptr, ptr %2, align 8
%4 = getelementptr inbounds %struct.SE_HOST_KernelArg, ptr %3, i64 1
%5 = getelementptr inbounds %struct.SE_HOST_KernelArg, ptr %3, i64 2
%6 = load ptr, ptr %3, align 8
%7 = load ptr, ptr %4, align 8
%8 = load ptr, ptr %5, align 8
%9 = getelementptr inbounds %SE_HOST_KernelCallFrame, ptr %0, i32 0, i32 1
%10 = load ptr, ptr %9, align 8
%11 = load i64, ptr %10, align 8
%12 = getelementptr inbounds i32, ptr %6, i64 %11
%13 = load i32, ptr %12, align 4
%14 = getelementptr inbounds i32, ptr %7, i64 %11
%15 = load i32, ptr %14, align 4
%16 = add nsw i32 %13, %15
%17 = getelementptr inbounds i32, ptr %8, i64 %11
store i32 %16, ptr %17, align 4
ret ptr null
}
)";
static absl::StatusOr<std::unique_ptr<StreamExecutor>> NewStreamExecutor() {
StreamExecutorConfig config(0);
TF_ASSIGN_OR_RETURN(auto platform, PlatformManager::PlatformWithName("Host"));
TF_ASSIGN_OR_RETURN(auto stream_exec, platform->GetUncachedExecutor(config));
return stream_exec;
}
TEST(HostKernelTest, InternalAddition1D) {
auto tp = std::make_shared<tsl::thread::ThreadPool>(tsl::Env::Default(),
"XLAEigen", 2);
HostKernel kernel(3, AddI32, tp);
std::vector<int32_t> lhs = {1, 2, 3, 4};
std::vector<int32_t> rhs = {5, 6, 7, 8};
std::vector<int32_t> out = {0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
TF_ASSERT_OK(kernel.Launch(ThreadDim(4), args));
std::vector<int32_t> expected = {6, 8, 10, 12};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, InternalAddition3D) {
auto tp = std::make_shared<tsl::thread::ThreadPool>(tsl::Env::Default(),
"XLAEigen", 2);
HostKernel kernel(3, AddI32, tp);
std::vector<int32_t> lhs = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
std::vector<int32_t> rhs = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21};
std::vector<int32_t> out = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
TF_ASSERT_OK(kernel.Launch(ThreadDim(2, 2, 3), args));
std::vector<int32_t> expected = {11, 13, 15, 17, 19, 21,
23, 25, 27, 29, 31, 33};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, Addition3D) {
std::vector<int32_t> lhs = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
std::vector<int32_t> rhs = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21};
std::vector<int32_t> out = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(reinterpret_cast<void*>(AddI32), "Addition_kernel");
TF_ASSERT_OK_AND_ASSIGN(auto executor, NewStreamExecutor());
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto add,
KernelFactory::Create(executor.get(), spec));
const KernelArgsDeviceMemoryArray kargs{args, 0};
TF_ASSERT_OK(stream->Launch(ThreadDim(2, 2, 3), BlockDim(1), *add, kargs));
std::vector<int32_t> expected = {11, 13, 15, 17, 19, 21,
23, 25, 27, 29, 31, 33};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, JitAddition) {
std::vector<int32_t> lhs = {1, 2, 3, 4};
std::vector<int32_t> rhs = {5, 6, 7, 8};
std::vector<int32_t> out = {0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
MultiKernelLoaderSpec spec(3);
spec.AddLlvmHostKernel(llvm_kernel_add, "LlvmAddI32", "LlvmAddI32",
absl::Span<std::string>());
TF_ASSERT_OK_AND_ASSIGN(auto executor, NewStreamExecutor());
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto add,
KernelFactory::Create(executor.get(), spec));
const KernelArgsDeviceMemoryArray kargs{args, 0};
TF_ASSERT_OK(stream->Launch(ThreadDim(4), BlockDim(1), *add, kargs));
std::vector<int32_t> expected = {6, 8, 10, 12};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, LaunchAsync) {
auto* no_op = +[](const SE_HOST_KernelCallFrame*) {
return static_cast<SE_HOST_KernelError*>(nullptr);
};
auto thread_pool = std::make_shared<tsl::thread::ThreadPool>(
tsl::Env::Default(), "benchmark", tsl::port::MaxParallelism());
std::atomic<size_t> num_tasks = 0;
HostKernel::TaskRunner runner = [&](HostKernel::Task task) {
num_tasks.fetch_add(1, std::memory_order_relaxed);
thread_pool->Schedule(ToCopyableTask(std::move(task)));
};
HostKernel host_kernel(0, no_op);
auto event = host_kernel.Launch(ThreadDim(4, 4, 4),
absl::Span<const SE_HOST_KernelArg>(),
std::move(runner));
tsl::BlockUntilReady(event);
EXPECT_TRUE(event.IsConcrete());
EXPECT_EQ(num_tasks.load(std::memory_order_relaxed), 4 * 4 * 4 - 1);
}
TEST(HostKernelTest, LaunchAsyncError) {
auto* maybe_error = +[](const SE_HOST_KernelCallFrame* call_frame) {
if (call_frame->thread->x == 2 && call_frame->thread->z == 2) {
return reinterpret_cast<SE_HOST_KernelError*>(0xDEADBEEF);
}
return static_cast<SE_HOST_KernelError*>(nullptr);
};
auto thread_pool = std::make_shared<tsl::thread::ThreadPool>(
tsl::Env::Default(), "benchmark", tsl::port::MaxParallelism());
std::atomic<size_t> num_tasks = 0;
HostKernel::TaskRunner runner = [&](HostKernel::Task task) {
num_tasks.fetch_add(1, std::memory_order_relaxed);
thread_pool->Schedule(ToCopyableTask(std::move(task)));
};
HostKernel host_kernel(0, maybe_error);
auto event = host_kernel.Launch(ThreadDim(4, 4, 4),
absl::Span<const SE_HOST_KernelArg>(),
std::move(runner));
tsl::BlockUntilReady(event);
ASSERT_TRUE(event.IsError());
EXPECT_TRUE(absl::StrContains(event.GetError().message(),
"Failed to call host kernel:"));
EXPECT_EQ(num_tasks.load(std::memory_order_relaxed), 4 * 4 * 4 - 1);
}
static SE_HOST_KernelError* NoOp(const SE_HOST_KernelCallFrame*) {
return nullptr;
}
static void BM_HostKernelSyncLaunch(benchmark::State& state) {
int32_t tdim_x = state.range(0);
HostKernel kernel(0, NoOp);
for (auto _ : state) {
benchmark::DoNotOptimize(kernel.Launch(
ThreadDim(tdim_x), absl::Span<const SE_HOST_KernelArg>()));
}
}
static void BM_HostKernelAsyncLaunch(benchmark::State& state) {
int32_t tdim_x = state.range(0);
auto thread_pool = std::make_shared<tsl::thread::ThreadPool>(
tsl::Env::Default(), "benchmark", tsl::port::MaxParallelism());
HostKernel kernel(0, NoOp);
for (auto _ : state) {
auto event =
kernel.Launch(ThreadDim(tdim_x), absl::Span<const SE_HOST_KernelArg>(),
[&](auto task) {
thread_pool->Schedule(ToCopyableTask(std::move(task)));
});
tsl::BlockUntilReady(event);
}
}
BENCHMARK(BM_HostKernelSyncLaunch)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(4)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64);
BENCHMARK(BM_HostKernelAsyncLaunch)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(4)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64);
} |
1,829 | cpp | tensorflow/tensorflow | host_stream | third_party/xla/xla/stream_executor/host/host_stream.cc | third_party/xla/xla/stream_executor/host/host_stream_test.cc | #ifndef XLA_STREAM_EXECUTOR_HOST_HOST_STREAM_H_
#define XLA_STREAM_EXECUTOR_HOST_HOST_STREAM_H_
#include <cstddef>
#include <memory>
#include <queue>
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_common.h"
#include "tsl/platform/env.h"
#include "tsl/platform/thread_annotations.h"
namespace stream_executor {
namespace host {
class HostStream : public StreamCommon {
public:
explicit HostStream(StreamExecutor* executor);
~HostStream() override;
bool EnqueueTaskWithStatus(absl::AnyInvocable<absl::Status() &&> task);
bool EnqueueTask(absl::AnyInvocable<void() &&> task);
absl::Status BlockUntilDone();
absl::Status WaitFor(Stream* other) override;
absl::Status WaitFor(Event* event) override;
absl::Status RecordEvent(Event* event) override;
absl::Status MemZero(DeviceMemoryBase* location, uint64_t size) override;
absl::Status Memset32(DeviceMemoryBase* location, uint32_t pattern,
uint64_t size) override;
absl::Status Memcpy(DeviceMemoryBase* gpu_dst, const void* host_src,
uint64_t size) override;
absl::Status Memcpy(DeviceMemoryBase* gpu_dst,
const DeviceMemoryBase& gpu_src, uint64_t size) override;
absl::Status Memcpy(void* host_dst, const DeviceMemoryBase& gpu_src,
uint64_t size) override;
private:
bool WorkAvailable() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void WorkLoop();
absl::Mutex mu_;
std::queue<absl::AnyInvocable<absl::Status() &&>> work_queue_
ABSL_GUARDED_BY(mu_);
std::unique_ptr<tsl::Thread> thread_;
absl::Status status_;
};
}
}
#endif
#include "xla/stream_executor/host/host_stream.h"
#include <string.h>
#include <cfenv>
#include <cstdint>
#include <memory>
#include <queue>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/host/host_event.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_common.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/env.h"
#include "tsl/platform/setround.h"
namespace stream_executor {
namespace host {
HostStream::HostStream(StreamExecutor* executor)
: StreamCommon(executor),
thread_(tsl::Env::Default()->StartThread({}, "host_executor",
[this]() { WorkLoop(); })) {}
HostStream::~HostStream() {
{
absl::MutexLock lock(&mu_);
work_queue_.push(nullptr);
}
thread_.reset();
parent()->DeallocateStream(this);
}
absl::Status HostStream::Memcpy(DeviceMemoryBase* gpu_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) {
void* dst_mem = gpu_dst->opaque();
void* src_mem = const_cast<void*>(gpu_src.opaque());
EnqueueTask([src_mem, dst_mem, size]() { memcpy(dst_mem, src_mem, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memcpy(void* host_dst, const DeviceMemoryBase& gpu_src,
uint64_t size) {
void* src_mem = const_cast<void*>(gpu_src.opaque());
EnqueueTask([host_dst, src_mem, size]() { memcpy(host_dst, src_mem, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memcpy(DeviceMemoryBase* gpu_dst, const void* host_src,
uint64_t size) {
void* dst_mem = gpu_dst->opaque();
EnqueueTask([dst_mem, host_src, size]() { memcpy(dst_mem, host_src, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memset32(DeviceMemoryBase* location, uint32_t pattern,
uint64_t size) {
void* gpu_mem = location->opaque();
EnqueueTask([gpu_mem, size, pattern]() { memset(gpu_mem, pattern, size); });
return absl::OkStatus();
}
absl::Status HostStream::MemZero(DeviceMemoryBase* location, uint64_t size) {
void* gpu_mem = location->opaque();
EnqueueTask([gpu_mem, size]() { memset(gpu_mem, 0, size); });
return absl::OkStatus();
}
absl::Status HostStream::WaitFor(Stream* other) {
auto event = std::make_shared<absl::Notification>();
static_cast<HostStream*>(other)->EnqueueTask([event]() { event->Notify(); });
EnqueueTask([event]() { event->WaitForNotification(); });
return absl::OkStatus();
}
absl::Status HostStream::WaitFor(Event* event) {
std::shared_ptr<absl::Notification> notification =
static_cast<HostEvent*>(event)->notification();
EnqueueTask([notification]() { notification->WaitForNotification(); });
return absl::OkStatus();
}
bool HostStream::EnqueueTask(absl::AnyInvocable<void() &&> task) {
return EnqueueTaskWithStatus([task = std::move(task)]() mutable {
std::move(task)();
return absl::OkStatus();
});
}
absl::Status HostStream::RecordEvent(Event* event) {
std::shared_ptr<absl::Notification> notification =
static_cast<HostEvent*>(event)->notification();
EnqueueTask([notification]() {
CHECK(!notification->HasBeenNotified());
notification->Notify();
});
return absl::OkStatus();
}
bool HostStream::EnqueueTaskWithStatus(
absl::AnyInvocable<absl::Status() &&> task) {
CHECK(task != nullptr);
absl::MutexLock lock(&mu_);
work_queue_.push(std::move(task));
return true;
}
bool HostStream::WorkAvailable() { return !work_queue_.empty(); }
void HostStream::WorkLoop() {
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
while (true) {
std::queue<absl::AnyInvocable<absl::Status() &&>> queue;
{
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &HostStream::WorkAvailable));
std::swap(queue, work_queue_);
}
while (!queue.empty()) {
absl::AnyInvocable<absl::Status() &&>& fn = queue.front();
if (!fn) {
return;
}
status_.Update(std::move(fn)());
queue.pop();
}
}
}
absl::Status HostStream::BlockUntilDone() {
absl::Notification done;
absl::Status status;
EnqueueTask([&done, &status, this]() {
status = status_;
status_ = absl::OkStatus();
done.Notify();
});
done.WaitForNotification();
return status;
}
}
} | #include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
TEST(HostStream, EnforcesFIFOOrder) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
absl::Mutex mu;
int expected = 0;
bool ok = true;
for (int i = 0; i < 2000; ++i) {
TF_ASSERT_OK(stream->DoHostCallback([i, &mu, &expected, &ok]() {
absl::MutexLock lock(&mu);
if (expected != i) {
ok = false;
}
++expected;
}));
}
TF_ASSERT_OK(stream->BlockHostUntilDone());
absl::MutexLock lock(&mu);
EXPECT_TRUE(ok);
}
TEST(HostStream, ReportsHostCallbackError) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error!"); }));
auto status = stream->BlockHostUntilDone();
ASSERT_EQ(status.code(), tsl::error::INTERNAL);
ASSERT_EQ(status.message(), "error!");
}
TEST(HostStream, ReportsFirstHostCallbackError) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error 1"); }));
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error 2"); }));
ASSERT_EQ(stream->BlockHostUntilDone().message(), "error 1");
} |
1,830 | cpp | tensorflow/tensorflow | c_api_conversions | third_party/xla/xla/stream_executor/tpu/c_api_conversions.cc | third_party/xla/xla/stream_executor/tpu/c_api_conversions_test.cc | #ifndef XLA_STREAM_EXECUTOR_TPU_C_API_CONVERSIONS_H_
#define XLA_STREAM_EXECUTOR_TPU_C_API_CONVERSIONS_H_
#include <array>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#include "xla/xla_data.pb.h"
namespace ApiConverter {
absl::Span<const float> MakeSpan(const FloatList& src_list);
void CreateVector(absl::Span<const float> src, FloatList* dst);
void Destroy(FloatList* float_list);
absl::Span<const int64_t> MakeSpan(const Int64List& src_list);
void CreateVector(absl::Span<const int64_t> src, Int64List* dst);
absl::Span<const int> MakeSpan(const IntList& src_list);
void CreateVector(absl::Span<const int> src, IntList* dst);
absl::Span<const bool> MakeSpan(const BoolList& src_list);
void CreateVector(absl::Span<const bool> src, BoolList* dst);
void CreateVector(absl::Span<const xla::DimLevelType> src, IntList* dst);
SE_DeviceMemoryBase ToC(const stream_executor::DeviceMemoryBase& base);
void ToC(const stream_executor::DeviceMemoryBase& base,
SE_DeviceMemoryBase* se_base);
stream_executor::DeviceMemoryBase FromC(const SE_DeviceMemoryBase& se_base);
void Destroy(SE_DeviceMemoryBase*);
xla::Tile FromC(const XLA_Tile* c_tile);
void ToC(const xla::Tile& xla_tile, XLA_Tile* c_tile);
void Destroy(XLA_Tile* c_tile);
xla::Layout FromC(const XLA_Layout* c_layout);
void ToC(const xla::Layout& xla_layout, XLA_Layout* c_layout);
void Destroy(XLA_Layout* c_layout);
xla::Shape FromC(const XLA_Shape* c_shape);
void ToC(const xla::Shape& xla_shape, XLA_Shape* c_shape);
void Destroy(XLA_Shape* c_shape);
XLA_ShapeIndex ToC(const xla::ShapeIndex& xla_shape);
xla::ShapeIndex FromC(XLA_ShapeIndex* c_shape);
void Destroy(XLA_ShapeIndex*);
void ToC(const xla::LiteralSlice& literal, XLA_Literal* c_literal);
xla::MutableBorrowingLiteral FromC(XLA_Literal* c_literal);
void Destroy(XLA_Literal* c_literal);
void ToC(const xla::ShapedBuffer& buffer, XLA_ShapedBuffer* c_device_buffer);
xla::ShapedBuffer FromC(XLA_ShapedBuffer* c_buffer);
void Destroy(XLA_ShapedBuffer* c_buffer);
SE_DeviceMemoryBase ToC(const stream_executor::DeviceMemoryBase& base);
stream_executor::DeviceMemoryBase FromC(const SE_DeviceMemoryBase& se_base);
void Destroy(SE_DeviceMemoryBase*);
void ToC(const xla::LiteralSlice& literal, XLA_Literal* c_literal);
xla::MutableBorrowingLiteral FromC(XLA_Literal* c_literal);
void Destroy(XLA_Literal* c_literal);
void ToC(const xla::ShapedBuffer& buffer, XLA_ShapedBuffer* c_device_buffer);
xla::ShapedBuffer FromC(XLA_ShapedBuffer* c_buffer);
void Destroy(XLA_ShapedBuffer* c_buffer);
struct TpuEmbeddingEngineParametersData {
std::array<std::vector<FloatListRef*>, 8> vectors;
TpuEmbeddingEngineParameters c_params;
};
std::unique_ptr<TpuEmbeddingEngineParametersData> Create(int num_tables);
xla::MaybeOwningDeviceMemory FromC(
SE_MaybeOwningDeviceMemory* se_mem,
stream_executor::DeviceMemoryAllocator* allocator);
SE_DeviceMemoryAllocator ToC(stream_executor::DeviceMemoryAllocator* allocator);
stream_executor::DeviceMemoryAllocator* FromC(
const SE_DeviceMemoryAllocator& c_allocator);
SE_MaybeOwningDeviceMemory ToC(stream_executor::OwningDeviceMemory* mem);
SE_MaybeOwningDeviceMemory ToC(xla::MaybeOwningDeviceMemory& mem, bool aliased);
XLA_HloModule ToC(const xla::HloModule& module);
absl::StatusOr<std::unique_ptr<xla::HloModule>> FromC(
const XLA_HloModule& c_module);
void Destroy(XLA_HloModule* c_module);
XLA_HloModuleConfig ToC(const xla::HloModuleConfig& config);
xla::HloModuleConfig FromC(const XLA_HloModuleConfig& c_config);
void Destroy(XLA_HloModuleConfig* c_config);
template <class CType>
struct StackHelper {
explicit StackHelper() {}
template <class CppType>
explicit StackHelper(const CppType& t) {
::ApiConverter::ToC(t, &value);
}
~StackHelper() { ::ApiConverter::Destroy(&value); }
template <class CppType>
CppType AsCpp() const {
return ::ApiConverter::FromC(&value);
}
mutable CType value;
};
}
#endif
#include "xla/stream_executor/tpu/c_api_conversions.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/service/computation_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#include "xla/stream_executor/tpu/c_api_defn.h"
#include "xla/stream_executor/tpu/proto_helper.h"
#include "xla/stream_executor/tpu/tpu_api.h"
#include "xla/stream_executor/tpu/tpu_executor_api.h"
#include "xla/stream_executor/tpu/tpu_executor_c_api.h"
#include "xla/stream_executor/tpu/tpu_ops_c_api.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace ApiConverter {
template <typename Src, typename Dst, typename DstList>
static void CreateVectorBase(const absl::Span<Src> src, DstList* dst) {
dst->size = src.size();
if (dst->size > TPU_C_API_MAX_INLINED) {
dst->heap = new Dst[dst->size];
std::copy(src.begin(), src.end(), dst->heap);
} else {
std::copy(src.begin(), src.end(), dst->inlined);
}
}
void CreateVector(const absl::Span<const int> src, IntList* dst) {
return CreateVectorBase<const int, int, IntList>(src, dst);
}
void CreateVector(const absl::Span<const int64_t> src, Int64List* dst) {
return CreateVectorBase<const int64_t, int64_t, Int64List>(src, dst);
}
void CreateVector(const absl::Span<const float> src, FloatList* dst) {
return CreateVectorBase<const float, float, FloatList>(src, dst);
}
void CreateVector(const absl::Span<const bool> src, BoolList* dst) {
return CreateVectorBase<const bool, bool, BoolList>(src, dst);
}
void CreateVector(const absl::Span<const xla::DimLevelType> src, IntList* dst) {
CreateVectorBase<const xla::DimLevelType, int, IntList>(src, dst);
}
static void CreateVector(const absl::Span<const bool> src, IntList* dst) {
CreateVectorBase<const bool, int, IntList>(src, dst);
}
static void CreateVector(const absl::Span<const xla::Tile> src, TileList* dst) {
dst->size = src.size();
XLA_Tile* c_tiles;
if (dst->size > TPU_C_API_MAX_INLINED) {
dst->heap = new XLA_Tile[dst->size];
c_tiles = dst->heap;
} else {
c_tiles = dst->inlined;
}
for (int i = 0; i < dst->size; ++i) {
ToC(src[i], &c_tiles[i]);
}
}
template <typename Dst, typename Src, typename SrcList>
static absl::Span<const Dst> MakeSpanBase(const SrcList& src_list) {
static_assert(sizeof(Src) == sizeof(Dst), "Mismatched types");
const Src* src = src_list.size > TPU_C_API_MAX_INLINED ? src_list.heap
: &src_list.inlined[0];
return absl::Span<const Dst>(reinterpret_cast<const Dst*>(src),
src_list.size);
}
absl::Span<const int> MakeSpan(const IntList& src_list) {
return MakeSpanBase<int, int, IntList>(src_list);
}
absl::Span<const int64_t> MakeSpan(const Int64List& src_list) {
return MakeSpanBase<int64_t, int64_t, Int64List>(src_list);
}
absl::Span<const float> MakeSpan(const FloatList& src_list) {
return MakeSpanBase<float, float, FloatList>(src_list);
}
absl::Span<const bool> MakeSpan(const BoolList& src_list) {
return MakeSpanBase<bool, bool, BoolList>(src_list);
}
xla::ShapedBuffer FromC(XLA_ShapedBuffer* c_buffer) {
xla::Shape xla_on_device_shape =
ApiConverter::FromC(&c_buffer->on_device_shape);
xla::ShapeTree<stream_executor::DeviceMemoryBase> xla_shape_tree(
xla_on_device_shape);
size_t i = 0;
for (auto& pair : xla_shape_tree) {
pair.second = ApiConverter::FromC(c_buffer->bases[i]);
i++;
}
xla::ShapedBuffer xla_shaped_buffer(xla_on_device_shape,
c_buffer->device_ordinal);
xla_shaped_buffer.set_buffers(xla_shape_tree);
return xla_shaped_buffer;
}
SE_MaybeOwningDeviceMemory ToC(xla::MaybeOwningDeviceMemory& mem,
bool aliased) {
SE_MaybeOwningDeviceMemory se_mem;
se_mem.owned = mem.HasOwnership();
se_mem.memory = ApiConverter::ToC(mem.AsDeviceMemoryBase());
if (mem.HasOwnership()) {
const stream_executor::OwningDeviceMemory* owned =
mem.AsOwningDeviceMemory();
se_mem.device_ordinal = owned->device_ordinal();
se_mem.allocator = ApiConverter::ToC(owned->allocator());
if (!aliased) {
mem.Release()->Release();
}
} else {
se_mem.allocator =
ToC(static_cast<stream_executor::DeviceMemoryAllocator*>(nullptr));
se_mem.device_ordinal = -1;
}
return se_mem;
}
xla::MaybeOwningDeviceMemory FromC(
SE_MaybeOwningDeviceMemory* se_mem,
stream_executor::DeviceMemoryAllocator* allocator) {
if (se_mem->owned) {
return xla::MaybeOwningDeviceMemory(
stream_executor::OwningDeviceMemory(ApiConverter::FromC(se_mem->memory),
se_mem->device_ordinal, allocator));
} else {
return xla::MaybeOwningDeviceMemory(ApiConverter::FromC(se_mem->memory));
}
}
SE_DeviceMemoryAllocator ToC(
stream_executor::DeviceMemoryAllocator* allocator) {
SE_DeviceMemoryAllocator se_allocator;
if (allocator == nullptr) {
se_allocator.ctx = nullptr;
se_allocator.platform = nullptr;
se_allocator.allocate = nullptr;
se_allocator.deallocate = nullptr;
return se_allocator;
}
se_allocator.platform = nullptr;
se_allocator.ctx = allocator;
se_allocator.allocate = [](void* ctx, int device_ordinal, uint64_t size,
bool retry_on_failure, int64_t memory_space,
SE_ScopedDeviceMemory* memory,
TF_Status* se_status) {
auto allocation =
reinterpret_cast<stream_executor::DeviceMemoryAllocator*>(ctx)
->Allocate(device_ordinal, size, retry_on_failure, memory_space);
if (!allocation.ok()) {
auto status = allocation.status();
auto message = status.message();
stream_executor::tpu::ExecutorApiFn()->TpuStatus_SetFn(
se_status, status.raw_code(), message.data(), message.size());
} else {
auto& scoped_memory = allocation.value();
memory->wrapped = ApiConverter::ToC(scoped_memory.Release());
memory->device_ordinal = scoped_memory.device_ordinal();
}
};
se_allocator.deallocate = [](void* ctx, SE_DeviceMemoryBase* base,
int device_ordinal, TF_Status* se_status) {
auto status = reinterpret_cast<stream_executor::DeviceMemoryAllocator*>(ctx)
->Deallocate(device_ordinal, ApiConverter::FromC(*base));
if (!status.ok()) {
auto message = status.message();
stream_executor::tpu::ExecutorApiFn()->TpuStatus_SetFn(
se_status, status.raw_code(), message.data(), message.size());
}
};
return se_allocator;
}
stream_executor::DeviceMemoryAllocator* FromC(
const SE_DeviceMemoryAllocator& c_allocator) {
return reinterpret_cast<stream_executor::DeviceMemoryAllocator*>(
c_allocator.ctx);
}
SE_MaybeOwningDeviceMemory ToC(stream_executor::OwningDeviceMemory* mem) {
SE_MaybeOwningDeviceMemory se_mem;
se_mem.device_ordinal = mem->device_ordinal();
se_mem.memory = ApiConverter::ToC(mem->Release());
se_mem.allocator = ApiConverter::ToC(mem->allocator());
se_mem.owned = true;
return se_mem;
}
void ToC(const stream_executor::DeviceMemoryBase& base,
SE_DeviceMemoryBase* se_base) {
se_base->opaque = const_cast<void*>(base.opaque());
se_base->payload = base.payload();
se_base->size = base.size();
}
SE_DeviceMemoryBase ToC(const stream_executor::DeviceMemoryBase& base) {
SE_DeviceMemoryBase se_base;
ToC(base, &se_base);
return se_base;
}
stream_executor::DeviceMemoryBase FromC(const SE_DeviceMemoryBase& se_base) {
stream_executor::DeviceMemoryBase base(se_base.opaque, se_base.size);
base.SetPayload(se_base.payload);
return base;
}
void ToC(const xla::Shape& xla_shape, XLA_Shape* c_shape) {
c_shape->element_type = xla_shape.element_type();
CreateVector(xla_shape.dimensions(), &c_shape->dimensions);
CreateVector(xla_shape.dynamic_dimensions(), &c_shape->dynamic_dimensions);
c_shape->ntuple_shapes = xla_shape.tuple_shapes_size();
if (c_shape->ntuple_shapes > 0) {
c_shape->tuple_shapes = new XLA_Shape[c_shape->ntuple_shapes];
for (int i = 0; i < c_shape->ntuple_shapes; ++i) {
ToC(xla_shape.tuple_shapes(i), &c_shape->tuple_shapes[i]);
}
}
if (xla_shape.has_layout()) {
c_shape->has_layout = true;
ToC(xla_shape.layout(), &c_shape->layout);
} else {
c_shape->has_layout = false;
}
}
xla::Shape FromC(const XLA_Shape* c_shape) {
absl::Span<const int64_t> dims = MakeSpan(c_shape->dimensions);
absl::Span<const bool> dynamic_dims = MakeSpan(c_shape->dynamic_dimensions);
std::vector<xla::Shape> tuple_shapes;
tuple_shapes.reserve(c_shape->ntuple_shapes);
for (int i = 0; i < c_shape->ntuple_shapes; ++i) {
tuple_shapes.push_back(FromC(&c_shape->tuple_shapes[i]));
}
xla::Shape result(static_cast<xla::PrimitiveType>(c_shape->element_type),
dims, dynamic_dims, std::move(tuple_shapes));
if (c_shape->has_layout) {
*result.mutable_layout() = FromC(&c_shape->layout);
}
return result;
}
void Destroy(XLA_Shape* c_shape) {
if (c_shape->dimensions.size > TPU_C_API_MAX_INLINED) {
delete[] c_shape->dimensions.heap;
}
if (c_shape->dynamic_dimensions.size > TPU_C_API_MAX_INLINED) {
delete[] c_shape->dynamic_dimensions.heap;
}
if (c_shape->ntuple_shapes > 0) {
for (int i = 0; i < c_shape->ntuple_shapes; ++i) {
Destroy(&c_shape->tuple_shapes[i]);
}
delete[] c_shape->tuple_shapes;
}
if (c_shape->has_layout) {
Destroy(&c_shape->layout);
}
}
void ToC(const xla::Layout& layout, XLA_Layout* c_layout) {
CreateVector(layout.minor_to_major(), &c_layout->minor_to_major);
{
const int n = layout.dim_level_types_size();
absl::InlinedVector<xla::DimLevelType, xla::InlineRank()> dim_level_types(
n);
for (int i = 0; i < n; i++) {
dim_level_types[i] = layout.dim_level_type(i);
}
CreateVector(dim_level_types, &c_layout->dim_level_types);
}
{
const int n = layout.dim_unique_size();
absl::InlinedVector<bool, xla::InlineRank()> dim_unique(n);
for (int i = 0; i < n; i++) {
dim_unique[i] = layout.dim_unique(i);
}
CreateVector(dim_unique, &c_layout->dim_unique);
}
{
const int n = layout.dim_ordered_size();
absl::InlinedVector<bool, xla::InlineRank()> dim_ordered(n);
for (int i = 0; i < n; i++) {
dim_ordered[i] = layout.dim_ordered(i);
}
CreateVector(dim_ordered, &c_layout->dim_ordered);
}
c_layout->index_primitive_type = layout.index_primitive_type();
c_layout->pointer_primitive_type = layout.pointer_primitive_type();
c_layout->element_size_in_bits = layout.element_size_in_bits();
c_layout->memory_space = layout.memory_space();
c_layout->dynamic_shape_metadata_prefix_bytes =
layout.dynamic_shape_metadata_prefix_bytes();
CreateVector(layout.tiles(), &c_layout->tiles);
c_layout->tail_padding_alignment_in_elements =
layout.tail_padding_alignment_in_elements();
}
xla::Layout FromC(const XLA_Layout* c_layout) {
absl::Span<const int64_t> minor_to_major = MakeSpan(c_layout->minor_to_major);
absl::Span<const int> dim_level_type_ints =
MakeSpan(c_layout->dim_level_types);
xla::DimLevelTypeVector dim_level_types;
dim_level_types.reserve(dim_level_type_ints.size());
for (int dim_level_type : dim_level_type_ints) {
dim_level_types.push_back(static_cast<xla::DimLevelType>(dim_level_type));
}
absl::Span<const int> dim_unique_ints = MakeSpan(c_layout->dim_unique);
absl::InlinedVector<bool, xla::InlineRank()> dim_unique(
dim_unique_ints.begin(), dim_unique_ints.end());
absl::Span<const int> dim_ordered_ints = MakeSpan(c_layout->dim_unique);
absl::InlinedVector<bool, xla::InlineRank()> dim_ordered(
dim_ordered_ints.begin(), dim_ordered_ints.end());
absl::InlinedVector<xla::Tile, 1> tiles;
const XLA_Tile* c_tiles = c_layout->tiles.size > TPU_C_API_MAX_INLINED
? c_layout->tiles.heap
: c_layout->tiles.inlined;
tiles.reserve(c_layout->tiles.size);
for (int i = 0; i < c_layout->tiles.size; ++i) {
tiles.push_back(FromC(&c_tiles[i]));
}
return xla::Layout(
minor_to_major, dim_level_types, dim_unique, dim_ordered, tiles,
c_layout->tail_padding_alignment_in_elements,
static_cast<xla::PrimitiveType>(c_layout->index_primitive_type),
static_cast<xla::PrimitiveType>(c_layout->pointer_primitive_type),
c_layout->element_size_in_bits, c_layout->memory_space,
{},
nullptr,
c_layout->dynamic_shape_metadata_prefix_bytes);
}
void Destroy(XLA_Layout* c_layout) {
if (c_layout->minor_to_major.size > TPU_C_API_MAX_INLINED) {
delete[] c_layout->minor_to_major.heap;
}
if (c_layout->dim_level_types.size > TPU_C_API_MAX_INLINED) {
delete[] c_layout->dim_level_types.heap;
}
if (c_layout->tiles.size > TPU_C_API_MAX_INLINED) {
delete[] c_layout->tiles.heap;
}
}
void ToC(const xla::Tile& tile, XLA_Tile* c_tile) {
CreateVector(tile.dimensions(), &c_tile->dimensions);
}
xla::Tile FromC(const XLA_Tile* c_tile) {
absl::Span<const int64_t> dims = MakeSpan(c_tile->dimensions);
return xla::Tile(dims);
}
void Destroy(XLA_Tile* c_tile) {
if (c_tile->dimensions.size > TPU_C_API_MAX_INLINED) {
delete[] c_tile->dimensions.heap;
}
}
XLA_ShapeIndex ToC(const xla::ShapeIndex& xla_shape) {
XLA_ShapeIndex c_shape;
CHECK_LT(xla_shape.size(), 8);
c_shape.count = xla_shape.size();
for (int i = 0; i < xla_shape.size(); ++i) {
c_shape.indices[i] = xla_shape[i];
}
return c_shape;
}
xla::ShapeIndex FromC(XLA_ShapeIndex* c_shape) {
return xla::ShapeIndex(c_shape->indices, c_shape->indices + c_shape->count);
}
void ToC(const xla::LiteralSlice& literal, XLA_Literal* c_literal) {
ApiConverter::ToC(literal.shape(), &c_literal->shape);
auto shapes = xla::ShapeUtil::GetLeafShapes(literal.shape());
c_literal->buffers = new char*[shapes.size()];
c_literal->sizes = new size_t[shapes.size()];
c_literal->count = shapes.size();
for (int i = 0; i < shapes.size(); ++i) {
c_literal->buffers[i] = reinterpret_cast<char*>(
const_cast<void*>(literal.untyped_data(shapes[i].index)));
c_literal->sizes[i] = literal.size_bytes(shapes[i].index);
}
}
xla::MutableBorrowingLiteral FromC(XLA_Literal* c_literal) {
xla::Shape shape = ApiConverter::FromC(&c_literal->shape);
return xla::MutableBorrowingLiteral(
absl::MakeSpan(c_literal->buffers, c_literal->count), shape);
}
void ToC(const xla::ShapedBuffer& buffer, XLA_ShapedBuffer* c_device_buffer) {
ApiConverter::ToC(buffer.on_device_shape(),
&c_device_buffer->on_device_shape);
c_device_buffer->device_ordinal = buffer.device_ordinal();
absl::InlinedVector<SE_DeviceMemoryBase, 2> bases;
for (auto& pair : buffer.buffers()) {
bases.push_back(ApiConverter::ToC(pair.second));
}
c_device_buffer->count = bases.size();
c_device_buffer->bases = new SE_DeviceMemoryBase[bases.size()];
for (int i = 0; i < bases.size(); ++i) {
c_device_buffer->bases[i] = bases[i];
}
}
std::unique_ptr<TpuEmbeddingEngineParametersData> Create(int num_tables) {
auto data = std::make_unique<TpuEmbeddingEngineParametersData>();
data->c_params.num_tables = num_tables;
for (int i = 0; i < 8; i++) {
data->vectors[i].resize(num_tables);
data->c_params.parameters[i] = data->vectors[i].data();
}
return data;
}
void Destroy(XLA_ShapeIndex* shape_index) { delete[] shape_index; }
void Destroy(SE_DeviceMemoryBase*) {}
void Destroy(XLA_Literal* c_literal) {
delete[] c_literal->buffers;
delete[] c_literal->sizes;
ApiConverter::Destroy(&c_literal->shape);
}
void Destroy(XLA_ShapedBuffer* c_buffer) {
ApiConverter::Destroy(&c_buffer->on_device_shape);
delete[] c_buffer->bases;
}
XLA_HloModule ToC(const xla::HloModule& module) {
XLA_HloModule c_module;
c_module.proto = stream_executor::tpu::SerializeProto(module.ToProto());
c_module.module_config = ApiConverter::ToC(module.config());
return c_module;
}
absl::StatusOr<std::unique_ptr<xla::HloModule>> FromC(
const XLA_HloModule& c_module) {
xla::HloModuleProto module_proto =
stream_executor::tpu::DeserializeProto<xla::HloModuleProto>(
c_module.proto);
return xla::HloModule::CreateFromProto(
module_proto, ApiConverter::FromC(c_module.module_config));
}
void Destroy(XLA_HloModule* c_module) {
stream_executor::tpu::SerializedProto_Free(c_module->proto);
Destroy(&c_module->module_config);
}
static xla::HloModuleConfig ConfigWithLayout(
const XLA_HloModuleConfig& se_config) {
xla::ShapeLayout result_layout(
FromC(&se_config.entry_computation_layout.result_layout));
xla::ComputationLayout layout(result_layout);
for (int i = 0; i < se_config.entry_computation_layout.parameter_count; ++i) {
layout.add_parameter_layout(xla::ShapeLayout(
FromC(&se_config.entry_computation_layout.parameter_layouts[i])));
}
return xla::HloModuleConfig(layout);
}
XLA_HloModuleConfig ToC(const xla::HloModuleConfig& config) {
XLA_HloModuleConfig hlo_config;
hlo_config.seed = config.seed();
hlo_config.launch_id = config.launch_id();
hlo_config.replica_count = config.replica_count();
hlo_config.num_partitions = config.num_partitions();
hlo_config.use_spmd_partitioning = config.use_spmd_partitioning();
hlo_config.use_auto_spmd_partitioning = config.use_auto_spmd_partitioning();
CreateVector(config.allow_spmd_sharding_propagation_to_parameters(),
&hlo_config.allow_spmd_sharding_propagation_to_parameters);
CreateVector(config.allow_spmd_sharding_propagation_to_output(),
&hlo_config.allow_spmd_sharding_propagation_to_output);
CreateVector(config.auto_spmd_partitioning_mesh_shape(),
&hlo_config.auto_spmd_partitioning_mesh_shape);
CreateVector(config.auto_spmd_partitioning_mesh_ids(),
&hlo_config.auto_spmd_partitioning_mesh_ids);
hlo_config.has_static_device_assignment =
config.has_static_device_assignment();
hlo_config.has_entry_computation_layout =
config.has_entry_computation_layout();
if (config.has_static_device_assignment()) {
xla::DeviceAssignmentProto dev_proto;
config.static_device_assignment().Serialize(&dev_proto);
hlo_config.static_device_assignment =
stream_executor::tpu::SerializeProto(dev_proto);
}
hlo_config.debug_options =
stream_executor::tpu::SerializeProto(config.debug_options());
if (config.has_entry_computation_layout()) {
const auto& layout = config.entry_computation_layout();
ApiConverter::ToC(layout.result_layout().shape(),
&hlo_config.entry_computation_layout.result_layout);
hlo_config.entry_computation_layout.parameter_layouts =
new XLA_Shape[layout.parameter_count()];
for (int i = 0; i < layout.parameter_count(); ++i) {
ApiConverter::ToC(
layout.parameter_layout(i).shape(),
&hlo_config.entry_computation_layout.parameter_layouts[i]);
}
hlo_config.entry_computation_layout.parameter_count =
layout.parameter_count();
}
return hlo_config;
}
xla::HloModuleConfig FromC(const XLA_HloModuleConfig& c_config) {
xla::HloModuleConfig config = c_config.has_entry_computation_layout
? ConfigWithLayout(c_config)
: xla::HloModuleConfig();
config.set_launch_id(c_config.launch_id);
config.set_seed(c_config.seed);
config.set_replica_count(c_config.replica_count);
config.set_num_partitions(c_config.num_partitions);
config.set_use_spmd_partitioning(c_config.use_spmd_partitioning);
config.set_use_auto_spmd_partitioning(c_config.use_auto_spmd_partitioning);
config.set_allow_spmd_sharding_propagation_to_parameters(
MakeSpan(c_config.allow_spmd_sharding_propagation_to_parameters));
config.set_allow_spmd_sharding_propagation_to_output(
MakeSpan(c_config.allow_spmd_sharding_propagation_to_output));
absl::Span<const int64_t> mesh_shape_span =
MakeSpan(c_config.auto_spmd_partitioning_mesh_shape);
config.set_auto_spmd_partitioning_mesh_shape(
std::vector<int64_t>(mesh_shape_span.begin(), mesh_shape_span.end()));
absl::Span<const int64_t> mesh_ids_span =
MakeSpan(c_config.auto_spmd_partitioning_mesh_ids);
config.set_auto_spmd_partitioning_mesh_ids(
std::vector<int64_t>(mesh_ids_span.begin(), mesh_ids_span.end()));
if (c_config.has_static_device_assignment) {
auto device_assignment = xla::DeviceAssignment::Deserialize(
stream_executor::tpu::DeserializeProto<xla::DeviceAssignmentProto>(
c_config.static_device_assignment));
config.set_static_device_assignment(
*(std::move(device_assignment).value()));
}
config.set_debug_options(
stream_executor::tpu::DeserializeProto<xla::DebugOptions>(
c_config.debug_options));
return config;
}
void Destroy(XLA_HloModuleConfig* c_config) {
for (auto i = 0; i < c_config->entry_computation_layout.parameter_count;
++i) {
ApiConverter::Destroy(
&c_config->entry_computation_layout.parameter_layouts[i]);
}
delete[] c_config->entry_computation_layout.parameter_layouts;
ApiConverter::Destroy(&c_config->entry_computation_layout.result_layout);
if (c_config->has_static_device_assignment) {
stream_executor::tpu::SerializedProto_Free(
c_config->static_device_assignment);
}
stream_executor::tpu::SerializedProto_Free(c_config->debug_options);
}
void Destroy(FloatList* float_list) {
if (float_list->size > TPU_C_API_MAX_INLINED) {
delete[] float_list->heap;
}
}
} | #include "xla/stream_executor/tpu/c_api_conversions.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace ApiConverter {
namespace {
constexpr absl::string_view kHloString =
R"(
HloModule TupleCreate_module:
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)";
TEST(XlaTile, ToCInlined) {
std::vector<int64_t> tile_dimensions{2, 3, 4, 5};
xla::Tile cpp_tile(tile_dimensions);
XLA_Tile c_tile;
ToC(cpp_tile, &c_tile);
absl::Span<const int64_t> cpp_tile_dimensions = cpp_tile.dimensions();
ASSERT_EQ(cpp_tile_dimensions, tile_dimensions);
absl::Span<const int64_t> c_tile_dimensions = MakeSpan(c_tile.dimensions);
EXPECT_EQ(cpp_tile_dimensions, c_tile_dimensions);
Destroy(&c_tile);
}
TEST(XlaTile, ToCDynamic) {
std::vector<int64_t> tile_dimensions{2, 3, 4, 5, 6, 7, 8, 9};
xla::Tile cpp_tile(tile_dimensions);
XLA_Tile c_tile;
ToC(cpp_tile, &c_tile);
absl::Span<const int64_t> cpp_tile_dimensions = cpp_tile.dimensions();
ASSERT_EQ(cpp_tile_dimensions, tile_dimensions);
absl::Span<const int64_t> c_tile_dimensions = MakeSpan(c_tile.dimensions);
EXPECT_EQ(cpp_tile_dimensions, c_tile_dimensions);
Destroy(&c_tile);
}
TEST(XlaTile, FromCInlined) {
constexpr size_t kInlinedSize = 4;
Int64List tile_dimensions;
tile_dimensions.size = kInlinedSize;
for (int i = 0; i < kInlinedSize; ++i) {
tile_dimensions.inlined[i] = i + 2;
}
XLA_Tile c_tile{tile_dimensions};
xla::Tile cpp_tile = FromC(&c_tile);
auto cpp_dimensions = cpp_tile.dimensions();
EXPECT_EQ(cpp_dimensions.size(), kInlinedSize);
for (int i = 0; i < kInlinedSize; ++i) {
EXPECT_EQ(cpp_dimensions[i], i + 2);
}
Destroy(&c_tile);
}
TEST(XlaTile, FromCDynamic) {
constexpr size_t kDynamicSize = 8;
int64_t* dynamic = new int64_t[kDynamicSize];
for (int i = 0; i < kDynamicSize; ++i) {
dynamic[i] = i + 2;
}
Int64List tile_dimensions;
tile_dimensions.size = kDynamicSize;
tile_dimensions.heap = dynamic;
XLA_Tile c_tile{tile_dimensions};
xla::Tile cpp_tile = FromC(&c_tile);
auto cpp_dimensions = cpp_tile.dimensions();
EXPECT_EQ(cpp_dimensions.size(), kDynamicSize);
for (int i = 0; i < kDynamicSize; ++i) {
EXPECT_EQ(cpp_dimensions[i], i + 2);
}
Destroy(&c_tile);
}
namespace TestImpl {
void XlaLayout_ToC(const xla::Layout& cpp_layout) {
XLA_Layout c_layout;
ToC(cpp_layout, &c_layout);
absl::Span<const int64_t> cpp_minor_to_major = cpp_layout.minor_to_major();
absl::Span<const int64_t> c_minor_to_major =
MakeSpan(c_layout.minor_to_major);
EXPECT_EQ(cpp_minor_to_major, c_minor_to_major);
absl::Span<const int> c_dim_level_types = MakeSpan(c_layout.dim_level_types);
EXPECT_EQ(cpp_layout.dim_level_types_size(), c_dim_level_types.size());
for (int i = 0; i < c_dim_level_types.size(); ++i) {
EXPECT_EQ(static_cast<int>(cpp_layout.dim_level_type(i)),
c_dim_level_types[i]);
}
absl::Span<const int> c_dim_unique = MakeSpan(c_layout.dim_unique);
EXPECT_EQ(cpp_layout.dim_unique_size(), c_dim_unique.size());
for (int i = 0; i < c_dim_unique.size(); ++i) {
EXPECT_EQ(cpp_layout.dim_unique(i), static_cast<bool>(c_dim_unique[i]));
}
absl::Span<const int> c_dim_ordered = MakeSpan(c_layout.dim_ordered);
EXPECT_EQ(cpp_layout.dim_ordered_size(), c_dim_ordered.size());
for (int i = 0; i < c_dim_ordered.size(); ++i) {
EXPECT_EQ(cpp_layout.dim_ordered(i), static_cast<bool>(c_dim_ordered[i]));
}
absl::Span<const xla::Tile> cpp_tiles = cpp_layout.tiles();
TileList c_tiles = c_layout.tiles;
EXPECT_EQ(cpp_tiles.size(), c_tiles.size);
XLA_Tile* tile_base =
(c_tiles.size > TPU_C_API_MAX_INLINED) ? c_tiles.heap : c_tiles.inlined;
for (int i = 0; i < c_tiles.size; ++i) {
xla::Tile converted_c_tile = FromC(&tile_base[i]);
EXPECT_EQ(cpp_tiles[i], converted_c_tile);
}
EXPECT_EQ(cpp_layout.index_primitive_type(), c_layout.index_primitive_type);
EXPECT_EQ(cpp_layout.pointer_primitive_type(),
c_layout.pointer_primitive_type);
EXPECT_EQ(cpp_layout.element_size_in_bits(), c_layout.element_size_in_bits);
EXPECT_EQ(cpp_layout.memory_space(), c_layout.memory_space);
EXPECT_EQ(cpp_layout.dynamic_shape_metadata_prefix_bytes(),
c_layout.dynamic_shape_metadata_prefix_bytes);
Destroy(&c_layout);
}
}
TEST(XlaLayout, ToCScalar) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
xla::Layout cpp_layout = cpp_shape.layout();
TestImpl::XlaLayout_ToC(cpp_layout);
}
TEST(XlaLayout, ToCNested) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
xla::Layout cpp_layout = cpp_shape.layout();
TestImpl::XlaLayout_ToC(cpp_layout);
}
TEST(XlaLayout, FromCScalar) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
xla::Layout in_layout = cpp_shape.layout();
XLA_Layout c_layout;
ToC(in_layout, &c_layout);
xla::Layout out_layout = FromC(&c_layout);
EXPECT_EQ(in_layout, out_layout);
Destroy(&c_layout);
}
TEST(XlaLayout, FromCNested) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
xla::Layout in_layout = cpp_shape.layout();
XLA_Layout c_layout;
ToC(in_layout, &c_layout);
xla::Layout out_layout = FromC(&c_layout);
EXPECT_EQ(in_layout, out_layout);
Destroy(&c_layout);
}
TEST(XlaShape, ToCScalar) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
XLA_Shape c_shape;
ToC(cpp_shape, &c_shape);
EXPECT_EQ(cpp_shape.element_type(), c_shape.element_type);
absl::Span<const int64_t> cpp_dimensions = cpp_shape.dimensions();
absl::Span<const int64_t> c_dimensions = MakeSpan(c_shape.dimensions);
EXPECT_EQ(cpp_dimensions, c_dimensions);
absl::Span<const bool> cpp_dynamic_dimensions =
cpp_shape.dynamic_dimensions();
absl::Span<const bool> c_dynamic_dimensions =
MakeSpan(c_shape.dynamic_dimensions);
EXPECT_EQ(cpp_dynamic_dimensions, c_dynamic_dimensions);
int cpp_ntuple_shapes = cpp_shape.tuple_shapes_size();
int c_ntuple_shapes = c_shape.ntuple_shapes;
EXPECT_EQ(cpp_ntuple_shapes, c_ntuple_shapes);
EXPECT_EQ(cpp_ntuple_shapes, 0);
bool cpp_has_layout = cpp_shape.has_layout();
bool c_has_layout = c_shape.has_layout;
EXPECT_EQ(cpp_has_layout, c_has_layout);
Destroy(&c_shape);
}
TEST(XlaShape, ToCNested) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
XLA_Shape c_shape;
ToC(cpp_shape, &c_shape);
EXPECT_EQ(cpp_shape.element_type(), c_shape.element_type);
absl::Span<const int64_t> cpp_dimensions = cpp_shape.dimensions();
absl::Span<const int64_t> c_dimensions = MakeSpan(c_shape.dimensions);
EXPECT_EQ(cpp_dimensions, c_dimensions);
absl::Span<const bool> cpp_dynamic_dimensions =
cpp_shape.dynamic_dimensions();
absl::Span<const bool> c_dynamic_dimensions =
MakeSpan(c_shape.dynamic_dimensions);
EXPECT_EQ(cpp_dynamic_dimensions, c_dynamic_dimensions);
int cpp_ntuple_shapes = cpp_shape.tuple_shapes_size();
int c_ntuple_shapes = c_shape.ntuple_shapes;
EXPECT_EQ(cpp_ntuple_shapes, c_ntuple_shapes);
const std::vector<xla::Shape>& cpp_tuple_shapes = cpp_shape.tuple_shapes();
absl::Span<const XLA_Shape> c_tuple_shapes(c_shape.tuple_shapes,
c_ntuple_shapes);
for (int i = 0; i < c_ntuple_shapes; ++i) {
xla::Shape converted_c_shape = FromC(&c_tuple_shapes[i]);
EXPECT_EQ(cpp_tuple_shapes[i], converted_c_shape);
}
bool cpp_has_layout = cpp_shape.has_layout();
bool c_has_layout = c_shape.has_layout;
EXPECT_EQ(cpp_has_layout, c_has_layout);
if (c_has_layout) {
xla::Layout converted_c_layout = FromC(&c_shape.layout);
EXPECT_EQ(cpp_shape.layout(), converted_c_layout);
}
Destroy(&c_shape);
}
TEST(XlaShape, FromCScalar) {
xla::Shape in_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
XLA_Shape c_shape;
ToC(in_shape, &c_shape);
xla::Shape out_shape = FromC(&c_shape);
EXPECT_EQ(in_shape, out_shape);
Destroy(&c_shape);
}
TEST(XlaShape, FromCNested) {
xla::Shape in_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
XLA_Shape c_shape;
ToC(in_shape, &c_shape);
xla::Shape out_shape = FromC(&c_shape);
EXPECT_EQ(in_shape, out_shape);
Destroy(&c_shape);
}
TEST(XlaHloModuleConfig, ToAndFromC) {
absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module =
xla::ParseAndReturnUnverifiedModule(kHloString);
ASSERT_TRUE(hlo_module.ok());
xla::HloModule& cpp_module = *hlo_module.value();
xla::HloModuleConfig in_config = cpp_module.config();
XLA_HloModuleConfig c_config = ToC(in_config);
xla::HloModuleConfig out_config = FromC(c_config);
xla::HloModuleConfigProto in_config_proto = in_config.ToProto();
xla::HloModuleConfigProto out_config_proto = out_config.ToProto();
tsl::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
tsl::protobuf::util::MessageDifferencer::EQUIVALENT);
EXPECT_TRUE(diff.Equals(in_config_proto, out_config_proto));
Destroy(&c_config);
}
TEST(XlaHloModule, ToAndFromC) {
absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module =
xla::ParseAndReturnUnverifiedModule(kHloString);
ASSERT_TRUE(hlo_module.ok());
xla::HloModule& in_module = *hlo_module.value();
XLA_HloModule c_module = ToC(in_module);
absl::StatusOr<std::unique_ptr<xla::HloModule>> out_module_ptr =
FromC(c_module);
ASSERT_TRUE(out_module_ptr.ok());
xla::HloModule& out_module = *out_module_ptr.value();
xla::HloModuleProtoWithConfig in_module_proto = in_module.ToProtoWithConfig();
xla::HloModuleProtoWithConfig out_module_proto =
out_module.ToProtoWithConfig();
tsl::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
tsl::protobuf::util::MessageDifferencer::EQUIVALENT);
const auto* ignore_unique_id =
xla::HloModuleProto::GetDescriptor()->FindFieldByName("id");
diff.IgnoreField(ignore_unique_id);
EXPECT_TRUE(diff.Compare(in_module_proto, out_module_proto));
Destroy(&c_module);
}
}
} |
1,831 | cpp | tensorflow/tensorflow | cuda_driver | third_party/xla/xla/stream_executor/cuda/cuda_driver.cc | third_party/xla/xla/stream_executor/cuda/cuda_driver_test.cc | #ifndef XLA_STREAM_EXECUTOR_CUDA_CUDA_DRIVER_H_
#define XLA_STREAM_EXECUTOR_CUDA_CUDA_DRIVER_H_
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/node_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
namespace stream_executor {
namespace gpu {
static std::string ToString(CUresult result) {
const char* error_name;
if (cuGetErrorName(result, &error_name)) {
return absl::StrCat("UNKNOWN ERROR (", static_cast<int>(result), ")");
}
const char* error_string;
if (cuGetErrorString(result, &error_string)) {
return error_name;
}
return absl::StrCat(error_name, ": ", error_string);
}
absl::StatusOr<CUresult> QueryEvent(GpuContext* context, CUevent event);
class GpuContext {
public:
GpuContext(CUcontext context, int64_t id) : context_(context), id_(id) {}
CUcontext context() const { return context_; }
int64_t id() const { return id_; }
GpuContext(GpuContext&&) = delete;
GpuContext(const GpuContext&) = delete;
GpuContext& operator=(GpuContext&&) = delete;
GpuContext& operator=(const GpuContext&) = delete;
private:
CUcontext const context_;
const int64_t id_;
};
class CreatedContexts {
public:
static bool Has(CUcontext context) {
absl::ReaderMutexLock lock(&mu_);
return Live()->find(context) != Live()->end();
}
static GpuContext* Add(CUcontext context, int device_ordinal) {
CHECK(context != nullptr);
absl::MutexLock lock(&mu_);
auto insert_result = Live()->insert(std::make_pair(context, nullptr));
auto it = insert_result.first;
if (insert_result.second) {
it->second = std::make_unique<GpuContext>(context, next_id_++);
(*LiveOrdinal())[device_ordinal].push_back(context);
}
return it->second.get();
}
static void Remove(CUcontext context) {
CHECK(context != nullptr);
absl::MutexLock lock(&mu_);
auto it = Live()->find(context);
CHECK(it != Live()->end()) << context;
Live()->erase(it);
for (auto p : (*LiveOrdinal())) {
auto it2 = std::find(p.second.begin(), p.second.end(), context);
if (it2 != p.second.end()) {
p.second.erase(it2, it2++);
if (p.second.empty()) {
LiveOrdinal()->erase(p.first);
}
break;
}
}
}
static int GetDeviceOrdinal(void* ptr) {
int device_ordinal;
CUresult result = cuPointerGetAttribute(static_cast<void*>(&device_ordinal),
CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,
reinterpret_cast<CUdeviceptr>(ptr));
if (result != CUDA_SUCCESS) {
LOG(FATAL) << "Not able to get the device_ordinal for ptr: " << ptr
<< ". Error: " << ToString(result);
}
return device_ordinal;
}
static CUcontext GetAnyContext(void* ptr) {
absl::ReaderMutexLock lock(&mu_);
int device_ordinal = GetDeviceOrdinal(ptr);
CHECK_EQ(LiveOrdinal()->count(device_ordinal), 1);
CHECK(!LiveOrdinal()->at(device_ordinal).empty())
<< "Need at least one context.";
return LiveOrdinal()->at(device_ordinal)[0];
}
private:
static absl::node_hash_map<CUcontext, std::unique_ptr<GpuContext>>* Live() {
static auto singleton =
new absl::node_hash_map<CUcontext, std::unique_ptr<GpuContext>>;
return singleton;
}
static absl::node_hash_map<int, std::vector<CUcontext>>* LiveOrdinal() {
static auto singleton =
new absl::node_hash_map<int, std::vector<CUcontext>>;
return singleton;
}
static absl::Mutex mu_;
static int64_t next_id_;
};
}
namespace cuda {
using MemorySpace = gpu::MemorySpace;
using CUDADriver = gpu::GpuDriver;
using ScopedActivateContext = gpu::ScopedActivateContext;
using CudaContext = gpu::GpuContext;
CUcontext CurrentContextOrDie();
}
}
#endif
#include "xla/stream_executor/cuda/cuda_driver.h"
#include <stdint.h>
#include <stdlib.h>
#include <cstdint>
#include <cstring>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/casts.h"
#include "absl/base/const_init.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#include "xla/stream_executor/gpu/gpu_diagnostics.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/platform.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/threadpool.h"
#define RETURN_IF_CUDA_RES_ERROR(expr, ...) \
do { \
CUresult _res = (expr); \
if (ABSL_PREDICT_FALSE(_res != CUDA_SUCCESS)) { \
if (_res == CUDA_ERROR_OUT_OF_MEMORY) \
return absl::ResourceExhaustedError(absl::StrCat( \
__VA_ARGS__, ":", ::stream_executor::gpu::ToString(_res))); \
else \
return absl::InternalError(absl::StrCat( \
__VA_ARGS__, ": ", ::stream_executor::gpu::ToString(_res))); \
} \
} while (0)
#define FAIL_IF_CUDA_RES_ERROR(expr, ...) \
do { \
CUresult _res = (expr); \
if (ABSL_PREDICT_FALSE(_res != CUDA_SUCCESS)) { \
LOG(FATAL) << absl::StrCat(__VA_ARGS__) << ": " \
<< ::stream_executor::gpu::ToString(_res); \
} \
} while (0)
constexpr bool kVerifyGpuContext = false;
namespace stream_executor {
namespace gpu {
absl::Mutex CreatedContexts::mu_{absl::kConstInit};
int64_t CreatedContexts::next_id_ = 1;
namespace {
CUcontext CurrentContext() {
CUcontext current = cuda::CurrentContextOrDie();
if (current != nullptr && !CreatedContexts::Has(current)) {
LOG(FATAL) << "current context was not created by the StreamExecutor "
"cuda_driver API: "
<< current
<< "; a CUDA runtime call "
"was likely performed without using a StreamExecutor context";
}
return current;
}
tsl::thread::ThreadPool* GetDriverExecutor() {
static tsl::thread::ThreadPool* thread_pool = new tsl::thread::ThreadPool(
tsl::Env::Default(), tsl::ThreadOptions(), "cuda_driver", 1);
return thread_pool;
}
}
std::string MemorySpaceString(MemorySpace memory_space) {
switch (memory_space) {
case MemorySpace::kHost:
return "host";
case MemorySpace::kDevice:
return "device";
default:
LOG(FATAL) << "impossible memory space";
}
}
namespace {
void SynchronizeOrDie() {
FAIL_IF_CUDA_RES_ERROR(cuCtxSynchronize(),
"Synchronize fail: ", tsl::CurrentStackTrace());
}
thread_local struct ThreadLocalData {
int64_t id;
GpuContext* context;
int depth;
} tls_data = {};
}
ScopedActivateContext::ScopedActivateContext(GpuContext* cuda_context) {
auto* tls = &tls_data;
if (tls->depth == 0) {
VLOG(3) << "ScopedActivateContext switching to " << cuda_context->id();
FAIL_IF_CUDA_RES_ERROR(cuCtxSetCurrent(cuda_context->context()),
"Failed setting context");
tls->depth = 1;
tls->id = cuda_context->id();
tls->context = cuda_context;
to_restore_ = nullptr;
return;
}
tls->depth++;
if (tls->id == cuda_context->id()) {
if (kVerifyGpuContext) {
CHECK_EQ(CurrentContext(), cuda_context->context());
}
DCHECK_EQ(CurrentContext(), cuda_context->context());
return;
}
VLOG(3) << "ScopedActivateContext switching context from " << tls->id
<< " to " << cuda_context->id();
to_restore_ = tls->context;
FAIL_IF_CUDA_RES_ERROR(cuCtxSetCurrent(cuda_context->context()),
"Failed setting context");
tls->id = cuda_context->id();
tls->context = cuda_context;
}
ScopedActivateContext::~ScopedActivateContext() {
auto* tls = &tls_data;
if (kVerifyGpuContext) {
CHECK_EQ(CurrentContext(),
tls->context == nullptr ? nullptr : tls->context->context());
}
tls->depth--;
DCHECK_GE(tls->depth, 0);
if (to_restore_ == nullptr) {
return;
}
FAIL_IF_CUDA_RES_ERROR(cuCtxSetCurrent(to_restore_->context()),
"Failed setting context");
tls->id = to_restore_->id();
tls->context = to_restore_;
}
namespace {
std::string CUDAPointerToDeviceString(CUdeviceptr pointer) {
auto value = GpuDriver::GetPointerDevice(pointer);
if (value.ok()) {
return absl::StrCat(value.value());
}
LOG(ERROR) << "could not query device: " << value.status();
return "?";
}
std::string CUDAPointerToMemorySpaceString(CUdeviceptr pointer) {
auto value = GpuDriver::GetPointerMemorySpace(pointer);
if (value.ok()) {
return MemorySpaceString(value.value());
}
LOG(ERROR) << "could not query device: " << value.status();
return "?";
}
std::string CUDAPointersToCanAccessString(CUdeviceptr from, CUdeviceptr to) {
auto from_context = GpuDriver::GetPointerContext(from);
if (!from_context.ok()) {
LOG(ERROR) << "could not retrieve source pointer's context: "
<< from_context.status();
return "source ptr error";
}
auto to_context = GpuDriver::GetPointerContext(to);
if (!to_context.ok()) {
LOG(ERROR) << "could not retrieve destination pointer's context: "
<< to_context.status();
return "destination ptr error";
}
return GpuDriver::CanEnablePeerAccess(from_context.value(),
to_context.value())
? "true"
: "false";
}
static absl::Status InternalInit() {
CUresult res = cuInit(0 );
if (res == CUDA_SUCCESS) {
return absl::OkStatus();
} else if (res == CUDA_ERROR_SHARED_OBJECT_INIT_FAILED) {
VLOG(1) << "failed call to cuInit: " << ToString(res);
} else {
LOG(ERROR) << "failed call to cuInit: " << ToString(res);
}
Diagnostician::LogDiagnosticInformation();
return absl::AbortedError(
absl::StrCat("failed call to cuInit: ", ToString(res)));
}
const char kScheduleSpinString[] = "spin";
const char kScheduleYieldString[] = "yield";
const char kScheduleBlockingSyncString[] = "blocking_sync";
int GetFlagsFromEnv() {
const char* gpu_schedule_string =
std::getenv("TF_CUDA_PLATFORM_GPU_DEVICE_SCHEDULE");
if (gpu_schedule_string == nullptr) {
return 0;
}
unsigned device_flags = 0;
if (strcmp(kScheduleSpinString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_SPIN;
} else if (strcmp(kScheduleYieldString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_YIELD;
} else if (strcmp(kScheduleBlockingSyncString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_BLOCKING_SYNC;
} else {
LOG(QFATAL) << "Unknown option for environment variable "
"TF_CUDA_PLATFORM_GPU_DEVICE_SCHEDULE "
<< gpu_schedule_string << " should be one of {"
<< kScheduleBlockingSyncString << ", " << kScheduleSpinString
<< ", " << kScheduleYieldString << "}";
}
return device_flags;
}
}
absl::StatusOr<CUresult> QueryEvent(GpuContext* context, CUevent event) {
ScopedActivateContext activated{context};
CUresult res = cuEventQuery(event);
if (res != CUDA_SUCCESS && res != CUDA_ERROR_NOT_READY) {
return absl::InternalError(
absl::StrFormat("failed to query event: %s", ToString(res)));
}
return res;
}
absl::Status GpuDriver::Init() {
static absl::Status* init_retval = [] {
return new absl::Status(InternalInit());
}();
return *init_retval;
}
absl::Status GpuDriver::GetDevice(int device_ordinal,
CUdevice* device) {
RETURN_IF_CUDA_RES_ERROR(cuDeviceGet(device, device_ordinal),
"Failed call to cuDeviceGet");
return absl::OkStatus();
}
absl::Status GpuDriver::GetDeviceName(CUdevice device,
std::string* device_name) {
static const size_t kCharLimit = 64;
absl::InlinedVector<char, 4> chars(kCharLimit);
RETURN_IF_CUDA_RES_ERROR(
cuDeviceGetName(chars.begin(), kCharLimit - 1, device),
"Failed to get device name");
chars[kCharLimit - 1] = '\0';
*device_name = chars.begin();
return absl::OkStatus();
}
absl::Status GpuDriver::CreateContext(int device_ordinal,
CUdevice device,
GpuContext** context) {
*context = nullptr;
int flags = GetFlagsFromEnv();
CUresult res;
CUcontext former_context;
CUcontext new_context;
unsigned int former_primary_context_flags;
int former_primary_context_is_active;
CHECK_EQ(CUDA_SUCCESS,
cuDevicePrimaryCtxGetState(device, &former_primary_context_flags,
&former_primary_context_is_active));
if (former_primary_context_flags != flags) {
if (former_primary_context_is_active) {
LOG(ERROR)
<< "The primary context is active and has a different flag set ("
<< former_primary_context_flags << ") than the desired flag set ("
<< flags << ").";
} else {
CHECK_EQ(CUDA_SUCCESS, cuDevicePrimaryCtxSetFlags(device, flags));
}
}
former_context = cuda::CurrentContextOrDie();
res = cuDevicePrimaryCtxRetain(&new_context, device);
if (former_context != nullptr) {
CUdevice former_device;
if (cuCtxGetDevice(&former_device) == CUDA_SUCCESS) {
if (former_device == device) {
if (former_context == new_context) {
VLOG(2) << "The primary context " << former_context << " for device "
<< device
<< " exists before initializing the StreamExecutor.";
} else {
LOG(WARNING) << "A non-primary context " << former_context
<< " for device " << device
<< " exists before initializing the StreamExecutor. The "
<< "primary context is now " << new_context << ". We "
<< "haven't verified StreamExecutor works with that.";
}
}
} else {
LOG(ERROR) << "Failed to get the device of the current context "
<< former_context;
}
}
CHECK_EQ(CUDA_SUCCESS, cuCtxSetCurrent(former_context));
if (res == CUDA_SUCCESS) {
*context = CreatedContexts::Add(new_context, device_ordinal);
CHECK(*context != nullptr)
<< "success in this call must entail non-null result";
VLOG(2) << "created or reused context " << new_context
<< " for this thread";
return absl::OkStatus();
}
std::string message =
"failed call to cuDevicePrimaryCtxRetain: " + ToString(res);
if (res == CUDA_ERROR_OUT_OF_MEMORY) {
uint64_t total_memory;
if (GetDeviceTotalMemory(device, &total_memory)) {
absl::StrAppend(&message, "; total memory reported: ", total_memory);
} else {
absl::StrAppend(&message, "; could not query total memory");
}
}
return absl::InternalError(message);
}
void GpuDriver::DestroyContext(GpuContext* context) {
if (context == nullptr) {
return;
}
CUresult res = cuCtxPushCurrent(context->context());
CUdevice device;
cuCtxGetDevice(&device);
cuCtxPopCurrent(nullptr);
res = cuDevicePrimaryCtxRelease(device);
if (res != CUDA_SUCCESS) {
LOG(ERROR) << "failed to release CUDA context; leaking: " << ToString(res);
}
CreatedContexts::Remove(context->context());
}
absl::Status GpuDriver::FuncGetAttribute(
CUfunction_attribute attribute, CUfunction func, int* attribute_value) {
RETURN_IF_CUDA_RES_ERROR(cuFuncGetAttribute(attribute_value, attribute, func),
"Failed to query kernel attribute: ", attribute);
return absl::OkStatus();
}
absl::Status GpuDriver::FuncSetCacheConfig(
CUfunction function, CUfunc_cache cache_config) {
RETURN_IF_CUDA_RES_ERROR(cuFuncSetCacheConfig(function, cache_config),
"Failed to set CUDA kernel cache config");
return absl::OkStatus();
}
absl::StatusOr<CUsharedconfig>
GpuDriver::ContextGetSharedMemConfig(GpuContext* context) {
CUsharedconfig shared_mem_config;
ScopedActivateContext activation(context);
RETURN_IF_CUDA_RES_ERROR(cuCtxGetSharedMemConfig(&shared_mem_config),
"Failed to get shared memory config");
return shared_mem_config;
}
absl::Status GpuDriver::ContextSetSharedMemConfig(
GpuContext* context, CUsharedconfig shared_mem_config) {
ScopedActivateContext activation(context);
RETURN_IF_CUDA_RES_ERROR(cuCtxSetSharedMemConfig(shared_mem_config),
"Failed to set shared memory config");
return absl::OkStatus();
}
absl::Status GpuDriver::CreateGraph(CUgraph* graph) {
VLOG(2) << "Create new CUDA graph";
RETURN_IF_CUDA_RES_ERROR(cuGraphCreate(graph, 0),
"Failed to create CUDA graph");
VLOG(2) << "Created CUDA graph " << *graph;
return absl::OkStatus();
}
absl::Status GpuDriver::DestroyGraph(CUgraph graph) {
VLOG(2) << "Destroy CUDA graph " << graph;
RETURN_IF_CUDA_RES_ERROR(cuGraphDestroy(graph),
"Failed to destroy CUDA graph");
return absl::OkStatus();
}
static std::string_view StreamCaptureModeToString(
GpuDriver::StreamCaptureMode mode) {
switch (mode) {
case GpuDriver::StreamCaptureMode::kGlobal:
return "global";
case GpuDriver::StreamCaptureMode::kThreadLocal:
return "threadlocal";
case GpuDriver::StreamCaptureMode::kRelaxed:
return "relaxed";
}
}
absl::Status GpuDriver::StreamBeginCapture(
CUstream stream, StreamCaptureMode mode) {
CUstreamCaptureMode cu_mode;
switch (mode) {
case StreamCaptureMode::kGlobal:
cu_mode = CU_STREAM_CAPTURE_MODE_GLOBAL;
break;
case StreamCaptureMode::kThreadLocal:
cu_mode = CU_STREAM_CAPTURE_MODE_THREAD_LOCAL;
break;
case StreamCaptureMode::kRelaxed:
cu_mode = CU_STREAM_CAPTURE_MODE_RELAXED;
break;
}
VLOG(2) << "Beginning stream " << stream << " capture in "
<< StreamCaptureModeToString(mode) << " mode";
RETURN_IF_CUDA_RES_ERROR(cuStreamBeginCapture(stream, cu_mode),
"Failed to begin stream capture");
return absl::OkStatus();
}
absl::Status GpuDriver::StreamBeginCaptureToGraph(
CUstream stream, CUgraph graph, StreamCaptureMode mode) {
CUstreamCaptureMode cu_mode;
switch (mode) {
case StreamCaptureMode::kGlobal:
cu_mode = CU_STREAM_CAPTURE_MODE_GLOBAL;
break;
case StreamCaptureMode::kThreadLocal:
cu_mode = CU_STREAM_CAPTURE_MODE_THREAD_LOCAL;
break;
case StreamCaptureMode::kRelaxed:
cu_mode = CU_STREAM_CAPTURE_MODE_RELAXED;
break;
}
#if CUDA_VERSION >= 12030
VLOG(2) << "Beginning stream " << stream << " capture in "
<< StreamCaptureModeToString(mode) << " mode to graph " << graph;
RETURN_IF_CUDA_RES_ERROR(
cuStreamBeginCaptureToGraph(stream, graph,
nullptr,
nullptr,
0, cu_mode),
"Failed to begin stream capture to graph");
return absl::OkStatus();
#else
return absl::UnimplementedError(
"StreamBeginCaptureToGraph is not implemented");
#endif
}
absl::Status GpuDriver::StreamEndCapture(CUstream stream,
CUgraph* graph) {
VLOG(2) << "End stream " << stream << " capture";
RETURN_IF_CUDA_RES_ERROR(cuStreamEndCapture(stream, graph),
"Failed to end stream capture");
return absl::OkStatus();
}
absl::Status GpuDriver::GraphInstantiate(
CUgraphExec* exec, CUgraph graph, const GraphInstantiateFlags& flags) {
VLOG(2) << "Instantiate CUDA executable graph from graph " << graph << " ("
<< "auto_free_on_launch=" << flags.auto_free_on_launch << ", "
<< "device_launch=" << flags.device_launch << ", "
<< "use_node_priority=" << flags.use_node_prirotiy << ", "
<< "upload=" << flags.upload << ")";
#if CUDA_VERSION >= 12000
uint64_t cu_flags = 0;
if (flags.auto_free_on_launch)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH;
if (flags.use_node_prirotiy)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY;
if (flags.device_launch)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH;
if (flags.upload) cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD;
RETURN_IF_CUDA_RES_ERROR(cuGraphInstantiate(exec, graph, cu_flags),
"Failed to instantiate CUDA graph");
#else
RETURN_IF_CUDA_RES_ERROR(cuGraphInstantiate(exec, graph, nullptr, nullptr, 0),
"Failed to instantiate CUDA graph");
#endif
return absl::OkStatus();
}
absl::Status GpuDriver::GraphLaunch(CUgraphExec exec,
CUstream stream) {
VLOG(2) << "Launching CUDA executable graph " << exec << " on a stream "
<< stream;
RETURN_IF_CUDA_RES_ERROR(cuGraphLaunch(exec, stream),
"Failed to launch CUDA graph");
return absl::OkStatus();
}
absl::Status GpuDriver::GraphNodeSetEnabled(CUgraphExec exec,
CUgraphNode node,
bool enabled) {
unsigned value = enabled ? 1 : 0;
VLOG(2) << "Set CUDA executable graph " << exec << " node " << node
<< " enabled flag to " << value;
RETURN_IF_CUDA_RES_ERROR(cuGraphNodeSetEnabled(exec, node, value),
"Failed to set CUDA graph node enabled flag");
return absl::OkStatus();
}
absl::Status GpuDriver::GraphExecUpdate(
CUgraphExec exec, CUgraph graph, GraphExecUpdateResultInfo* result) {
VLOG(2) << "Update CUDA graph executable " << exec << " with graph " << graph;
#if CUDA_VERSION >= 12000
CUgraphExecUpdateResultInfo cu_result;
memset(&cu_result, 0, sizeof(cu_result));
CUresult err_code = cuGraphExecUpdate(exec, graph, &cu_result);
auto cu_result_enum = cu_result.result;
if (cu_result.errorFromNode) {
result->error_from_node = cu_result.errorFromNode;
}
if (cu_result.errorNode) {
result->error_node = cu_result.errorNode;
}
#else
CUgraphExecUpdateResult cu_result;
CUresult err_code = cuGraphExecUpdate(exec, graph, nullptr, &cu_result);
auto cu_result_enum = cu_result;
#endif
switch (cu_result_enum) {
case CU_GRAPH_EXEC_UPDATE_SUCCESS:
result->result = GraphExecUpdateResult::kSuccess;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR:
result->result = GraphExecUpdateResult::kError;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED:
result->result = GraphExecUpdateResult::kTopologyChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED:
result->result = GraphExecUpdateResult::kNodeTypeChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED:
result->result = GraphExecUpdateResult::kFunctionChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED:
result->result = GraphExecUpdateResult::kParametersChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED:
result->result = GraphExecUpdateResult::kNotSupported;
break;
#if CUDA_VERSION >= 12000
case CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE:
result->result = GraphExecUpdateResult::kUnsupportedFunctionChange;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED:
result->result = GraphExecUpdateResult::kAttributesChanged;
break;
#endif
default:
return absl::InternalError("Unknown graph update result");
}
RETURN_IF_CUDA_RES_ERROR(err_code, "Failed to update CUDA graph");
return absl::OkStatus();
}
absl::StatusOr<GpuDriver::GraphNodeType>
GpuDriver::GraphNodeGetType(CUgraphNode node) {
CUgraphNodeType cu_node_type;
memset(&cu_node_type, 0, sizeof(cu_node_type));
RETURN_IF_CUDA_RES_ERROR(cuGraphNodeGetType(node, &cu_node_type),
"Failed to get CUDA graph node type");
switch (cu_node_type) {
case CU_GRAPH_NODE_TYPE_KERNEL:
return GraphNodeType::kKernel;
case CU_GRAPH_NODE_TYPE_MEMCPY:
return GraphNodeType::kMemcpy;
case CU_GRAPH_NODE_TYPE_MEMSET:
return GraphNodeType::kMemset;
case CU_GRAPH_NODE_TYPE_HOST:
return GraphNodeType::kHost;
case CU_GRAPH_NODE_TYPE_GRAPH:
return GraphNodeType::kGraph;
case CU_GRAPH_NODE_TYPE_EMPTY:
return GraphNodeType::kEmpty;
#if CUDA_VERSION >= 12000
case CU_GRAPH_NODE_TYPE_WAIT_EVENT:
return GraphNodeType::kWaitEvent;
case CU_GRAPH_NODE_TYPE_EVENT_RECORD:
return GraphNodeType::kEventRecord;
case CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL:
return GraphNodeType::kExtSemasSignal;
case CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT:
return GraphNodeType::kExtSemasWait;
case CU_GRAPH_NODE_TYPE_MEM_ALLOC:
return GraphNodeType::kMemAlloc;
case CU_GRAPH_NODE_TYPE_MEM_FREE:
return GraphNodeType::kMemFree;
case CU_GRAPH_NODE_TYPE_BATCH_MEM_OP:
return GraphNodeType::kBatchMemOp;
#endif
default:
return absl::InternalError("Unknown graph node type");
}
return absl::InternalError("Invalid CUDA graph node type");
}
absl::StatusOr<std::vector<GpuGraphNodeHandle>>
GpuDriver::GraphNodeGetDependencies(GpuGraphNodeHandle node) {
VLOG(2) << "Get CUDA graph node " << node << " dependencies";
std::vector<CUgraphNode> dependencies;
size_t num_dependencies = 0;
RETURN_IF_CUDA_RES_ERROR(
cuGraphNodeGetDependencies(node, nullptr, &num_dependencies),
"Failed to get CUDA graph node depedenc | #include "absl/log/log.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/cuda/cuda_driver.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace gpu {
void CheckCuda(CUresult result, const char* file, int line) {
if (result == CUDA_SUCCESS) {
return;
}
const char* name;
cuGetErrorName(result, &name);
const char* message;
cuGetErrorString(result, &message);
LOG(FATAL) << file << "(" << line << "): " << name << ", " << message;
}
void CheckCuda(cudaError_t result, const char* file, int line) {
if (result == cudaSuccess) {
return;
}
const char* name = cudaGetErrorName(result);
const char* message = cudaGetErrorString(result);
LOG(FATAL) << file << "(" << line << "): " << name << ", " << message;
}
#define CHECK_CUDA(result) CheckCuda(result, __FILE__, __LINE__)
TEST(CudaDriverTest, ScopedActivateContextTest) {
CHECK_CUDA(cuInit(0));
CUdevice device;
CHECK_CUDA(cuDeviceGet(&device, 0));
CUcontext context0, context1;
CHECK_CUDA(cuCtxCreate(&context0, 0, device));
CHECK_CUDA(cuCtxCreate(&context1, 0, device));
GpuContext se_context1(context1, 101);
{
ScopedActivateContext scope(&se_context1);
CUcontext c;
CHECK_CUDA(cuCtxGetCurrent(&c));
EXPECT_EQ(c, context1);
}
CHECK_CUDA(cuCtxSetCurrent(context0));
{
ScopedActivateContext scope(&se_context1);
CUcontext c;
CHECK_CUDA(cuCtxGetCurrent(&c));
EXPECT_EQ(c, context1);
}
}
}
} |
1,832 | cpp | tensorflow/tensorflow | redzone_allocator | third_party/xla/xla/stream_executor/gpu/redzone_allocator.cc | third_party/xla/xla/stream_executor/gpu/redzone_allocator_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_REDZONE_ALLOCATOR_H_
#define XLA_STREAM_EXECUTOR_GPU_REDZONE_ALLOCATOR_H_
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
class RedzoneAllocator : public ScratchAllocator {
public:
static constexpr int64_t kDefaultRedzoneSize =
1LL << 23;
static constexpr uint8_t kDefaultRedzonePattern = -1;
RedzoneAllocator(Stream* stream, DeviceMemoryAllocator* memory_allocator,
const GpuAsmOpts& gpu_compilation_opts_,
int64_t memory_limit = (1LL << 32),
int64_t redzone_size = kDefaultRedzoneSize,
uint8_t redzone_pattern = kDefaultRedzonePattern);
int64_t GetMemoryLimitInBytes() override { return memory_limit_; }
int64_t TotalAllocatedBytesExcludingRedzones() const {
return allocated_bytes_excluding_redzones_;
}
absl::StatusOr<DeviceMemory<uint8>> AllocateBytes(int64_t byte_size) override;
struct RedzoneCheckStatus {
RedzoneCheckStatus() = default;
RedzoneCheckStatus(absl::string_view buffer_name, void* user_buffer_address,
int64_t offset, uint64_t expected_value,
uint64_t actual_value)
: buffer_name(buffer_name),
user_buffer_address(user_buffer_address),
offset(offset),
expected_value(expected_value),
actual_value(actual_value) {}
static RedzoneCheckStatus OK() { return {}; }
bool ok() { return user_buffer_address == nullptr; }
std::string RedzoneFailureMsg() const;
std::string buffer_name = {};
void* user_buffer_address = nullptr;
int64_t offset = 0;
uint64_t expected_value = 0;
uint64_t actual_value = 0;
};
absl::StatusOr<RedzoneCheckStatus> CheckRedzones() const;
Stream* stream() const { return stream_; }
private:
const int device_ordinal_;
Stream* stream_;
const int64_t memory_limit_;
const int64_t redzone_size_;
const uint8_t redzone_pattern_;
DeviceMemoryAllocator* memory_allocator_;
GpuAsmOpts gpu_compilation_opts_;
std::vector<std::pair<OwningDeviceMemory, int64_t>> allocated_buffers_;
int64_t allocated_bytes_excluding_redzones_ = 0;
};
}
#endif
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/redzone_allocator_kernel.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "tsl/lib/math/math_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
template <typename T>
static T RoundUpToNearest(T value, T divisor) {
return tsl::MathUtil::CeilOfRatio(value, divisor) * divisor;
}
constexpr int64_t kRhsRedzoneAlign = 4;
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
RedzoneAllocator::RedzoneAllocator(Stream* stream,
DeviceMemoryAllocator* memory_allocator,
const GpuAsmOpts& gpu_compilation_opts,
int64_t memory_limit, int64_t redzone_size,
uint8_t redzone_pattern)
: device_ordinal_(stream->parent()->device_ordinal()),
stream_(stream),
memory_limit_(memory_limit),
redzone_size_(RoundUpToNearest(
redzone_size,
static_cast<int64_t>(tsl::Allocator::kAllocatorAlignment))),
redzone_pattern_(redzone_pattern),
memory_allocator_(memory_allocator),
gpu_compilation_opts_(gpu_compilation_opts) {}
absl::StatusOr<DeviceMemory<uint8_t>> RedzoneAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
int64_t rhs_slop = RoundUpToNearest(byte_size, kRhsRedzoneAlign) - byte_size;
TF_ASSIGN_OR_RETURN(
OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_,
byte_size + 2 * redzone_size_ + rhs_slop,
false));
allocated_bytes_excluding_redzones_ += byte_size;
static_assert(sizeof(uint8_t) == 1, "Unexpected size");
DeviceMemory<uint8_t> allocated_buffer_memory(*allocated_buffer);
DeviceMemory<uint8_t> lhs_redzone =
allocated_buffer_memory.GetSlice(0, redzone_size_);
DeviceMemory<uint8_t> data_chunk =
allocated_buffer_memory.GetSlice(redzone_size_, byte_size);
DeviceMemory<uint8_t> rhs_redzone_slop =
allocated_buffer_memory.GetSlice(redzone_size_ + byte_size, rhs_slop);
DeviceMemory<uint8_t> rhs_redzone_nonslop = allocated_buffer_memory.GetSlice(
redzone_size_ + byte_size + rhs_slop, redzone_size_);
uint8_t pattern_arr[] = {redzone_pattern_, redzone_pattern_, redzone_pattern_,
redzone_pattern_};
uint32_t pattern32;
std::memcpy(&pattern32, pattern_arr, sizeof(pattern32));
TF_RETURN_IF_ERROR(stream_->Memset32(&lhs_redzone, pattern32, redzone_size_));
if (rhs_slop != 0) {
TF_RETURN_IF_ERROR(
stream_->Memcpy(&rhs_redzone_slop, &pattern32, rhs_slop));
}
TF_RETURN_IF_ERROR(
stream_->Memset32(&rhs_redzone_nonslop, pattern32, redzone_size_));
allocated_buffers_.emplace_back(std::move(allocated_buffer), byte_size);
return data_chunk;
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzoneHost(
DeviceMemoryBase redzone, DeviceMemoryBase user_allocation,
absl::string_view name, Stream* stream, uint8_t redzone_pattern) {
uint64_t size = redzone.size();
auto redzone_data = std::make_unique<uint8_t[]>(size);
TF_RETURN_IF_ERROR(stream->Memcpy(redzone_data.get(), redzone, size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
std::array<uint8_t, sizeof(uint64_t)> pattern_arr;
pattern_arr.fill(redzone_pattern);
uint64_t pattern64;
std::memcpy(&pattern64, pattern_arr.data(), sizeof(uint64_t));
int64_t i;
for (i = 0; i + 7 < size; i += sizeof(uint64_t)) {
uint64_t rz_value = *reinterpret_cast<uint64_t*>(&redzone_data[i]);
if (rz_value != pattern64) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i, pattern64,
rz_value);
}
}
for (; i < size; ++i) {
uint8_t rz_value = redzone_data[i];
if (rz_value != redzone_pattern) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i,
redzone_pattern, rz_value);
}
}
return RedzoneCheckStatus::OK();
}
static absl::Status RunRedzoneChecker(
Stream* stream, const DeviceMemory<uint8_t>& redzone,
uint8_t redzone_pattern, const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel) {
StreamExecutor* executor = stream->parent();
if (redzone.size() == 0) {
return absl::OkStatus();
}
int64_t num_elements = redzone.size();
int64_t threads_per_block = std::min(
executor->GetDeviceDescription().threads_per_block_limit(), num_elements);
int64_t block_count =
tsl::MathUtil::CeilOfRatio(num_elements, threads_per_block);
TF_RETURN_IF_ERROR(stream->ThenLaunch(
ThreadDim(threads_per_block), BlockDim(block_count), comparison_kernel,
redzone, redzone_pattern, redzone.size(), out_param));
return absl::OkStatus();
}
static absl::Status ReinitializeRedzone(Stream* stream,
DeviceMemoryBase redzone,
uint8_t redzone_pattern) {
absl::FixedArray<uint8_t> redzone_array(redzone.size());
redzone_array.fill(redzone_pattern);
TF_RETURN_IF_ERROR(
stream->Memcpy(&redzone, redzone_array.data(), redzone.size()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
return absl::OkStatus();
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzonesForBuffer(
Stream* stream, DeviceMemoryBase memory,
const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel, int64_t user_allocation_size,
uint64_t redzone_size, uint8_t redzone_pattern) {
int64_t rhs_slop =
RoundUpToNearest<int64_t>(user_allocation_size, kRhsRedzoneAlign) -
user_allocation_size;
CHECK_EQ(memory.size(), user_allocation_size + rhs_slop + 2 * redzone_size);
DeviceMemory<uint8_t> buffer_uint8(memory);
DeviceMemory<uint8_t> lhs_redzone =
buffer_uint8.GetSlice(0,
redzone_size);
DeviceMemory<uint8_t> user_allocation =
buffer_uint8.GetSlice(redzone_size,
user_allocation_size);
DeviceMemory<uint8_t> rhs_redzone =
buffer_uint8.GetSlice(redzone_size + user_allocation_size,
redzone_size + rhs_slop);
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, lhs_redzone, redzone_pattern,
out_param, comparison_kernel));
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, rhs_redzone, redzone_pattern,
out_param, comparison_kernel));
int64_t result;
CHECK_EQ(out_param.size(), sizeof(result));
TF_RETURN_IF_ERROR(stream->Memcpy(&result, out_param, sizeof(result)));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (result != 0) {
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus lhs_check,
CheckRedzoneHost(lhs_redzone, user_allocation, "LHS",
stream, redzone_pattern));
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus rhs_check,
CheckRedzoneHost(rhs_redzone, user_allocation, "RHS",
stream, redzone_pattern));
CHECK(!lhs_check.ok() || !rhs_check.ok())
<< "Mismatched results with host and device comparison";
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, lhs_redzone, redzone_pattern));
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, rhs_redzone, redzone_pattern));
return !lhs_check.ok() ? lhs_check : rhs_check;
}
return RedzoneCheckStatus::OK();
}
absl::StatusOr<RedzoneCheckStatus> RedzoneAllocator::CheckRedzones() const {
StreamExecutor* executor = stream_->parent();
TF_ASSIGN_OR_RETURN(
const ComparisonKernel* kernel,
GetComparisonKernel(stream_->parent(), gpu_compilation_opts_));
stream_executor::DeviceMemoryHandle out_param(
executor, executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(
stream_->MemZero(out_param.memory_ptr(), sizeof(uint64_t)));
for (const auto& buf_and_size : allocated_buffers_) {
TF_ASSIGN_OR_RETURN(
RedzoneCheckStatus redzone_status,
CheckRedzonesForBuffer(stream_, *buf_and_size.first,
DeviceMemory<uint64_t>(out_param.memory()),
*kernel, buf_and_size.second, redzone_size_,
redzone_pattern_));
if (!redzone_status.ok()) {
return redzone_status;
}
}
return RedzoneCheckStatus::OK();
}
std::string RedzoneCheckStatus::RedzoneFailureMsg() const {
return absl::StrFormat(
"Redzone mismatch in %s redzone of buffer %p at offset %d; "
"expected %08x but was %08x.",
buffer_name, user_buffer_address, offset, expected_value, actual_value);
}
} | #include "xla/stream_executor/gpu/redzone_allocator.h"
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace gpu {
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
static void EXPECT_REDZONE_OK(absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_TRUE(status.value().ok());
}
static void EXPECT_REDZONE_VIOLATION(
absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_FALSE(status.value().ok());
}
TEST(RedzoneAllocatorTest, WriteToRedzone) {
constexpr int64_t kRedzoneSize = 1 << 23;
constexpr uint8_t kRedzonePattern = 0x7e;
constexpr int64_t kAllocSize = (1 << 25) + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
kRedzonePattern);
TF_ASSERT_OK_AND_ASSIGN(DeviceMemory<uint8_t> buf,
allocator.AllocateBytes(kAllocSize));
EXPECT_REDZONE_OK(allocator.CheckRedzones());
char* buf_addr = reinterpret_cast<char*>(buf.opaque());
DeviceMemoryBase lhs_redzone(buf_addr - kRedzoneSize, kRedzoneSize);
DeviceMemoryBase rhs_redzone(buf_addr + kAllocSize, kRedzoneSize);
auto check_redzone = [&](DeviceMemoryBase redzone, absl::string_view name) {
std::vector<uint8_t> host_buf(kRedzoneSize);
TF_ASSERT_OK(stream->Memcpy(host_buf.data(), redzone, kRedzoneSize));
TF_ASSERT_OK(stream->BlockHostUntilDone());
const int64_t kMaxMismatches = 16;
int64_t mismatches = 0;
for (int64_t i = 0; i < host_buf.size(); ++i) {
if (mismatches == kMaxMismatches) {
ADD_FAILURE() << "Hit max number of mismatches; skipping others.";
break;
}
if (host_buf[i] != kRedzonePattern) {
++mismatches;
EXPECT_EQ(host_buf[i], kRedzonePattern)
<< "at index " << i << " of " << name << " redzone";
}
}
};
check_redzone(lhs_redzone, "lhs");
check_redzone(rhs_redzone, "rhs");
auto modify_redzone = [&](DeviceMemoryBase redzone, int64_t offset,
absl::string_view name) {
SCOPED_TRACE(absl::StrCat(name, ", offset=", offset));
DeviceMemoryBase redzone_at_offset(
reinterpret_cast<char*>(redzone.opaque()) + offset, 1);
char old_redzone_value = 0;
{ EXPECT_REDZONE_OK(allocator.CheckRedzones()); }
TF_ASSERT_OK(stream->Memcpy(&old_redzone_value, redzone_at_offset, 1));
TF_ASSERT_OK(stream->MemZero(&redzone_at_offset, 1));
EXPECT_REDZONE_VIOLATION(allocator.CheckRedzones());
EXPECT_REDZONE_OK(allocator.CheckRedzones());
};
modify_redzone(lhs_redzone, 0, "lhs");
modify_redzone(lhs_redzone, kRedzoneSize - 1, "lhs");
modify_redzone(rhs_redzone, 0, "rhs");
modify_redzone(rhs_redzone, kRedzoneSize - 1, "rhs");
}
TEST(RedzoneAllocatorTest, VeryLargeRedzone) {
constexpr int64_t kRedzoneSize = 65535 * 1024 + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
-1);
(void)allocator.AllocateBytes(1);
EXPECT_REDZONE_OK(allocator.CheckRedzones());
}
}
} |
1,833 | cpp | tensorflow/tensorflow | gpu_cudamallocasync_allocator | third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator.cc | third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_GPU_CUDAMALLOCASYNC_ALLOCATOR_H_
#define XLA_STREAM_EXECUTOR_GPU_GPU_CUDAMALLOCASYNC_ALLOCATOR_H_
#include <atomic>
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/mutex.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#define TF_CUDA_MALLOC_ASYNC_SUPPORTED CUDA_VERSION >= 11020
#endif
namespace stream_executor {
class GpuCudaMallocAsyncAllocator : public tsl::Allocator {
public:
explicit GpuCudaMallocAsyncAllocator(tsl::PlatformDeviceId platform_device_id,
size_t release_threshold,
bool reserve_memory = false,
bool compute_stats = true);
explicit GpuCudaMallocAsyncAllocator(tsl::PlatformDeviceId platform_device_id,
bool create_new_pool,
size_t new_pool_size,
bool reserve_memory = false,
size_t reserve_memory_size = 0,
bool sync_mode = false,
bool compute_stats = true);
~GpuCudaMallocAsyncAllocator() override;
std::string Name() override { return name_; }
void* AllocateRaw(size_t alignment,
size_t num_bytes) override ABSL_NO_THREAD_SAFETY_ANALYSIS;
void DeallocateRaw(void* ptr) override ABSL_NO_THREAD_SAFETY_ANALYSIS;
bool TracksAllocationSizes() const override;
size_t RequestedSize(const void* ptr) const override;
size_t AllocatedSize(const void* ptr) const override;
std::optional<tsl::AllocatorStats> GetStats() override;
bool ClearStats() override;
void SetStreamAndPreallocateMemory(void* stream) override;
static int GetInstantiatedCountTestOnly() { return number_instantiated_; }
tsl::AllocatorMemoryType GetMemoryType() const override {
return tsl::AllocatorMemoryType::kDevice;
}
private:
void PrintAllocatorStatisticsNoLock() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);
#if TF_CUDA_MALLOC_ASYNC_SUPPORTED
StreamExecutor* stream_exec_;
CUstream cuda_stream_;
CUmemoryPool pool_;
#endif
static std::atomic<int> number_instantiated_;
std::string name_;
bool reserve_memory_;
bool create_new_pool_;
bool sync_mode_;
GpuCudaMallocAsyncAllocator(const GpuCudaMallocAsyncAllocator&) = delete;
void operator=(const GpuCudaMallocAsyncAllocator&) = delete;
mutable tsl::mutex lock_;
std::unique_ptr<tsl::AllocatorStats> stats_ ABSL_PT_GUARDED_BY(lock_);
absl::flat_hash_map<const void*, size_t> size_map_ ABSL_GUARDED_BY(lock_);
};
}
#endif
#include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#ifdef GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/cuda/cuda_activation.h"
#endif
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/util/env_var.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
namespace stream_executor {
#if GOOGLE_CUDA
static std::string GetCudaErrorMessage(CUresult result) {
const char* error;
cuGetErrorString(result, &error);
const char* name;
cuGetErrorName(result, &name);
return absl::StrCat("CUDA error: ", error ? error : "<unknown>", " (",
name ? name : "Unknown", ")");
}
#endif
void GpuCudaMallocAsyncAllocator::PrintAllocatorStatisticsNoLock() {
std::map<size_t, int> size_map_histogram;
std::vector<std::string> ptr_size_string;
for (auto p : size_map_) {
if (VLOG_IS_ON(8)) {
ptr_size_string.push_back(
absl::StrCat("(", absl::Hex(p.first), ",", p.second) + ")");
}
size_map_histogram[p.second]++;
}
LOG(ERROR) << "Histogram of current allocation: (allocation_size_in_bytes, "
<< "nb_allocation_of_that_sizes), ...;";
for (auto p : size_map_histogram) {
LOG(ERROR) << p.first << ", " << p.second;
}
VLOG(8) << "\nThe sorted list of (ptr,size):";
VLOG(8) << absl::StrJoin(ptr_size_string, ",");
#if CUDA_VERSION >= 11030
cuuint64_t mem_reserved_current;
if (auto result = cuMemPoolGetAttribute(
pool_, CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT, &mem_reserved_current)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< GetCudaErrorMessage(result);
}
cuuint64_t mem_used_current;
if (auto result = cuMemPoolGetAttribute(
pool_, CU_MEMPOOL_ATTR_USED_MEM_CURRENT, &mem_used_current)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< GetCudaErrorMessage(result);
}
cuuint64_t mem_reserved_high;
if (auto result = cuMemPoolGetAttribute(
pool_, CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH, &mem_reserved_high)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< GetCudaErrorMessage(result);
}
cuuint64_t mem_used_high;
if (auto result = cuMemPoolGetAttribute(pool_, CU_MEMPOOL_ATTR_USED_MEM_HIGH,
&mem_used_high)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< GetCudaErrorMessage(result);
}
LOG(ERROR) << "CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: "
<< mem_reserved_current;
LOG(ERROR) << "CU_MEMPOOL_ATTR_USED_MEM_CURRENT: " << mem_used_current;
LOG(ERROR) << "CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: " << mem_reserved_high;
LOG(ERROR) << "CU_MEMPOOL_ATTR_USED_MEM_HIGH: " << mem_used_high;
#endif
}
std::atomic<int> GpuCudaMallocAsyncAllocator::number_instantiated_(0);
GpuCudaMallocAsyncAllocator::GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId platform_device_id, bool create_new_pool,
size_t new_pool_size, bool reserve_memory, size_t reserve_memory_size,
bool sync_mode, bool compute_stats)
: name_(absl::StrCat("gpu_async_", platform_device_id.value())),
reserve_memory_(reserve_memory),
create_new_pool_(create_new_pool),
sync_mode_(sync_mode) {
++number_instantiated_;
(void)reserve_memory_;
#if TF_CUDA_MALLOC_ASYNC_SUPPORTED
stream_exec_ = GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
pool_ = nullptr;
cuda_stream_ = nullptr;
int driverVersion;
cuDriverGetVersion(&driverVersion);
VLOG(2) << "DRIVER VERSION: " << driverVersion;
if (driverVersion < 11020) {
LOG(FATAL)
<< "Disable cuda_malloc_async or update your CUDA driver to a version"
<< " compatible with CUDA 11.2 or higher."
<< " We detected a version compatible with: " << driverVersion;
}
if (platform_device_id.value() > 0 && driverVersion < 11030) {
CUcontext pctx;
if (auto result = cuDevicePrimaryCtxRetain(&pctx, 0))
LOG(FATAL)
<< "Failed to retain context: " << GetCudaErrorMessage(result);
}
cuda::ScopedActivateExecutorContext scoped_activation{stream_exec_};
if (auto status2 = cuDriverGetVersion(&driverVersion)) {
LOG(FATAL)
<< "Error while fetching driver version: "
<< GetCudaErrorMessage(status2);
}
int cuda_malloc_async_supported;
if (auto status =
cuDeviceGetAttribute(&cuda_malloc_async_supported,
CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED,
platform_device_id.value())) {
LOG(FATAL)
<< "On device: " << platform_device_id.value()
<< " Current driver: " << driverVersion
<< ". Failed to get device attribute : " << GetCudaErrorMessage(status);
}
if (!cuda_malloc_async_supported)
LOG(FATAL)
<< "TF_GPU_ALLOCATOR=cuda_malloc_async isn't currently supported on "
<< "GPU id " << platform_device_id.value() << ":"
<< " Possible causes: device not supported (request SM60+), driver too "
"old, "
<< " OS not supported, CUDA version too old(request CUDA11.2+).";
size_t pool_size;
if (create_new_pool_) {
pool_size = new_pool_size;
CUmemPoolProps pool_props;
memset(reinterpret_cast<void*>(&pool_props), 0, sizeof(pool_props));
pool_props.allocType = CU_MEM_ALLOCATION_TYPE_PINNED;
pool_props.handleTypes = CU_MEM_HANDLE_TYPE_NONE;
pool_props.location.id = platform_device_id.value();
pool_props.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
#if CUDA_VERSION >= 12030
pool_props.maxSize = new_pool_size;
#endif
if (auto status = cuMemPoolCreate(&pool_, &pool_props))
LOG(FATAL) <<
"Failed to create CUDA pool: " << GetCudaErrorMessage(status);
} else {
pool_size = reserve_memory_size;
if (auto status =
cuDeviceGetDefaultMemPool(&pool_, platform_device_id.value()))
LOG(FATAL) <<
"Failed to get default CUDA pool: " << GetCudaErrorMessage(status);
VLOG(2) << "using default memory pool " << pool_;
}
VLOG(1) << Name() << " CudaMallocAsync initialized on platform: "
<< platform_device_id.value() << " with pool size of: " << pool_size
<< " this ptr: " << this;
uint64_t release_threshold_64 = reserve_memory_size;
if (auto status = cuMemPoolSetAttribute(
pool_, CU_MEMPOOL_ATTR_RELEASE_THRESHOLD, &release_threshold_64))
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << GetCudaErrorMessage(status);
if (compute_stats) {
stats_ = std::make_unique<tsl::AllocatorStats>();
stats_->bytes_limit = static_cast<int64_t>(pool_size);
}
bool deterministic = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_DETERMINISTIC_ALLOCATOR",
false, &deterministic));
if (deterministic) {
int disable = 0;
if (auto status = cuMemPoolSetAttribute(
pool_, CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, &disable)) {
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << GetCudaErrorMessage(status);
}
if (auto status = cuMemPoolSetAttribute(
pool_, CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES,
&disable)) {
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << GetCudaErrorMessage(status);
}
}
static auto* all_pools_ = new std::vector<CUmemoryPool*>();
static auto* all_ids_ = new std::vector<tsl::PlatformDeviceId>();
if (!create_new_pool_) {
DCHECK(all_pools_->size() == all_ids_->size());
for (int i = 0; i < all_pools_->size(); ++i) {
CUmemAccessDesc map;
map.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE;
map.location.id = (*all_ids_)[i].value();
map.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
VLOG(2) << "Setting access of the current pool to "
<< " location id: " << map.location.id;
int canAccessPeer;
if (auto status = cuDeviceCanAccessPeer(
&canAccessPeer, platform_device_id.value(), map.location.id)) {
pool_ = nullptr;
LOG(FATAL)
<< "cuDeviceCanAccessPeer failed to know if GPU id "
<< map.location.id << " can access GPU id "
<< platform_device_id.value() << ": "
<< GetCudaErrorMessage(status);
}
if (canAccessPeer == 1) {
if (auto status = cuMemPoolSetAccess(pool_, &map, 1)) {
pool_ = nullptr;
LOG(FATAL)
<< "Error when setting access to the pool id: " << i
<< " location id: " << map.location.id
<< " error: " << GetCudaErrorMessage(status);
}
}
map.location.id = platform_device_id.value();
VLOG(2) << "Set access to the pool id: " << i
<< " location id: " << map.location.id;
if (auto status = cuDeviceCanAccessPeer(&canAccessPeer, i,
platform_device_id.value())) {
pool_ = nullptr;
LOG(FATAL)
<< "cuDeviceCanAccessPeer failed: " << GetCudaErrorMessage(status);
}
if (canAccessPeer == 1) {
if (auto status = cuMemPoolSetAccess(*(*all_pools_)[i], &map, 1)) {
pool_ = nullptr;
LOG(FATAL)
<< "Error when setting access to the pool id: " << i
<< " location id: " << map.location.id
<< " error: " << GetCudaErrorMessage(status);
}
}
}
all_pools_->push_back(&pool_);
all_ids_->push_back(platform_device_id);
}
VLOG(2) << Name() << " GpuCudaMallocAsyncAllocator PoolSize " << pool_size;
#else
LOG(FATAL) << "GpuCudaMallocAsyncAllocator requires CUDA 11.2+";
#endif
}
GpuCudaMallocAsyncAllocator::GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId platform_device_id, size_t release_threshold,
bool reserve_memory, bool compute_stats)
: GpuCudaMallocAsyncAllocator(platform_device_id, false, 0, reserve_memory,
release_threshold, false, compute_stats) {}
GpuCudaMallocAsyncAllocator::~GpuCudaMallocAsyncAllocator() {
#if TF_CUDA_MALLOC_ASYNC_SUPPORTED
if (create_new_pool_) {
VLOG(2) << "Delete memory pool " << reinterpret_cast<void*>(pool_);
if (auto status = cuMemPoolDestroy(pool_))
LOG(FATAL) << "Failed to destroy memory pool:"
<< GetCudaErrorMessage(status);
}
#endif
}
void* GpuCudaMallocAsyncAllocator::AllocateRaw(size_t alignment,
size_t num_bytes) {
#if TF_CUDA_MALLOC_ASYNC_SUPPORTED
CHECK(cuda_stream_ != nullptr)
<< "A stream must be added to the GpuCudaMallocAsync allocator";
if (pool_ == nullptr) {
LOG(FATAL)
<< "The instantiation of GpuCudaMallocAsyncAllocator failed."
<< " See previous errors.";
}
std::unique_lock<tsl::mutex> lock(lock_, std::defer_lock);
if (stats_) {
lock.lock();
}
cuda::ScopedActivateExecutorContext scoped_activation{stream_exec_};
void* ptr = nullptr;
auto result = cuMemAllocFromPoolAsync(reinterpret_cast<CUdeviceptr*>(&ptr),
num_bytes, pool_, cuda_stream_);
if (result == CUDA_ERROR_OUT_OF_MEMORY) {
cuStreamSynchronize(cuda_stream_);
result = cuMemAllocFromPoolAsync(reinterpret_cast<CUdeviceptr*>(&ptr),
num_bytes, pool_, cuda_stream_);
}
if (result) {
size_t free, total;
cuMemGetInfo(&free, &total);
LOG(ERROR) << Name() << " cuMemAllocAsync failed to allocate " << num_bytes
<< " bytes: " << GetCudaErrorMessage(result)
<< "\n Reported by CUDA: Free memory/Total memory: " << free
<< "/" << total;
if (stats_) {
LOG(ERROR) << "Stats: " << stats_->DebugString();
PrintAllocatorStatisticsNoLock();
}
return nullptr;
}
if (sync_mode_) {
cuStreamSynchronize(cuda_stream_);
}
if (stats_) {
++(stats_->num_allocs);
stats_->bytes_in_use += num_bytes;
if (stats_->bytes_in_use > stats_->peak_bytes_in_use) {
VLOG(9) << "New Peak memory usage of " << stats_->bytes_in_use
<< " bytes.";
}
stats_->peak_bytes_in_use =
std::max(stats_->peak_bytes_in_use, stats_->bytes_in_use);
stats_->largest_alloc_size =
std::max<std::size_t>(stats_->largest_alloc_size, num_bytes);
bool ptr_inserted = size_map_.emplace(ptr, num_bytes).second;
DCHECK(ptr_inserted);
}
VLOG(10) << Name() << " Allocated " << num_bytes << " at " << ptr;
return ptr;
#else
return nullptr;
#endif
}
void GpuCudaMallocAsyncAllocator::DeallocateRaw(void* ptr) {
#if TF_CUDA_MALLOC_ASYNC_SUPPORTED
if (ptr == nullptr) return;
std::unique_lock<tsl::mutex> lock(lock_, std::defer_lock);
if (stats_) {
lock.lock();
}
if (auto result = cuMemFreeAsync(reinterpret_cast<const CUdeviceptr&>(ptr),
cuda_stream_)) {
if (result == CUDA_ERROR_DEINITIALIZED) {
VLOG(1) << "Ignoring CUDA error: " << GetCudaErrorMessage(result);
} else {
size_t free, total;
cuda::ScopedActivateExecutorContext scoped_activation{stream_exec_};
cuMemGetInfo(&free, &total);
LOG(ERROR) << "cudaFreeAsync failed to free " << ptr << ": "
<< GetCudaErrorMessage(result)
<< "\n Free memory/Total memory: " << free << "/" << total;
if (stats_) {
LOG(ERROR) << "Stats: " << stats_->DebugString();
}
}
}
if (sync_mode_) {
cuStreamSynchronize(cuda_stream_);
}
if (stats_) {
DCHECK(size_map_.contains(ptr));
size_t size = size_map_[ptr];
stats_->bytes_in_use -= size;
size_map_.erase(ptr);
}
VLOG(10) << Name() << " Freed ptr: " << ptr;
#endif
}
bool GpuCudaMallocAsyncAllocator::TracksAllocationSizes() const {
return static_cast<bool>(stats_);
}
size_t GpuCudaMallocAsyncAllocator::RequestedSize(const void* ptr) const {
if (!stats_ || !ptr) return 0;
tsl::mutex_lock l(lock_);
return size_map_.at(ptr);
}
size_t GpuCudaMallocAsyncAllocator::AllocatedSize(const void* ptr) const {
if (!stats_ || !ptr) return 0;
tsl::mutex_lock l(lock_);
return size_map_.at(ptr);
}
std::optional<tsl::AllocatorStats> GpuCudaMallocAsyncAllocator::GetStats() {
if (!stats_) return std::nullopt;
tsl::mutex_lock l(lock_);
return *stats_;
}
bool GpuCudaMallocAsyncAllocator::ClearStats() {
if (!stats_) return false;
tsl::mutex_lock l(lock_);
stats_->num_allocs = 0;
stats_->peak_bytes_in_use = stats_->bytes_in_use;
stats_->largest_alloc_size = 0;
return true;
}
void GpuCudaMallocAsyncAllocator::SetStreamAndPreallocateMemory(void* stream) {
#if TF_CUDA_MALLOC_ASYNC_SUPPORTED
auto new_cuda_stream = static_cast<CUstream>(stream);
if (cuda_stream_ != nullptr && new_cuda_stream != cuda_stream_) {
LOG(FATAL) <<
"Trying to set the stream twice. This isn't supported. ";
}
uint64_t pool_size_64 = 0;
if (auto status = cuMemPoolGetAttribute(
pool_, CU_MEMPOOL_ATTR_RELEASE_THRESHOLD, &pool_size_64)) {
LOG(FATAL) <<
"Failed to get CUDA pool attribute: " << GetCudaErrorMessage(status);
}
cuda_stream_ = new_cuda_stream;
int64_t prealloc_size = 0;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar(
"TF_CUDA_MALLOC_ASYNC_SUPPORTED_PREALLOC", 0, &prealloc_size));
if (prealloc_size == -1) {
prealloc_size = pool_size_64;
} else if (reserve_memory_) {
prealloc_size = pool_size_64;
}
if (prealloc_size != 0) {
void* ptr = AllocateRaw(0, prealloc_size);
DeallocateRaw(ptr);
VLOG(2) << Name() << " GpuCudaMallocAsyncAllocator reserved the pool for "
<< prealloc_size << " bytes" << ". First ptr: " << ptr;
ClearStats();
}
#endif
}
} | #include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#ifdef GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
namespace se = stream_executor;
namespace {
static se::StreamExecutor* GpuExecutor() {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
}
namespace stream_executor {
TEST(GpuCudaMallocAsyncAllocator, AddressAlignedDefaultPool) {
#if CUDA_VERSION < 11030
GTEST_SKIP() << "Cuda async memory allocator is not supported for CUDA "
"version less than 11030";
#endif
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
TEST(GpuCudaMallocAsyncAllocator, AddressAlignedNewPool) {
#if CUDA_VERSION < 11030
GTEST_SKIP() << "Cuda async memory allocator is not supported for CUDA "
"version less than 11030";
#endif
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
true,
2048,
true,
0,
false,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
TEST(GpuCudaMallocAsyncAllocator, SyncAddressAlignedNewPool) {
#if CUDA_VERSION < 11030
GTEST_SKIP() << "Cuda async memory allocator is not supported for CUDA "
"version less than 11030";
#endif
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
true,
2048,
true,
0,
true,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
} |
1,834 | cpp | tensorflow/tensorflow | gpu_command_buffer | third_party/xla/xla/stream_executor/gpu/gpu_command_buffer.cc | third_party/xla/xla/stream_executor/gpu/gpu_command_buffer_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_GPU_COMMAND_BUFFER_H_
#define XLA_STREAM_EXECUTOR_GPU_GPU_COMMAND_BUFFER_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor::gpu {
class GpuCommandBuffer : public CommandBuffer {
public:
struct GpuGraphNodeInfo {
GpuGraphNodeHandle handle = nullptr;
};
struct GpuGraphBarrierInfo {
GpuGraphNodeHandle handle = nullptr;
bool is_barrier_node = true;
size_t nodes_offset = 0;
};
GpuCommandBuffer(Mode mode, GpuExecutor* parent, GpuGraphHandle graph,
bool is_owned_graph = true);
~GpuCommandBuffer() override;
absl::Status Barrier(ExecutionScopeId execution_scope_id) override;
absl::Status Barrier(
absl::Span<const ExecutionScopeId> execution_scope_ids) override;
absl::Status Barrier(ExecutionScopeId from_execution_scope_id,
ExecutionScopeId to_execution_scope_id) override;
absl::Status Launch(ExecutionScopeId execution_scope_id,
const ThreadDim& threads, const BlockDim& blocks,
const Kernel& kernel, const KernelArgs& args) override;
absl::Status AddNestedCommandBuffer(ExecutionScopeId execution_scope_id,
const CommandBuffer& nested) override;
absl::Status MemcpyDeviceToDevice(ExecutionScopeId execution_scope_id,
DeviceMemoryBase* dst,
const DeviceMemoryBase& src,
uint64_t size) override;
absl::Status Memset(ExecutionScopeId execution_scope_id,
DeviceMemoryBase* dst, BitPattern bit_pattern,
size_t num_elements) override;
absl::Status If(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> predicate, Builder then_builder) override;
absl::Status IfElse(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> predicate, Builder then_builder,
Builder else_builder) override;
absl::Status Case(ExecutionScopeId execution_scope_id,
DeviceMemory<int32_t> index,
std::vector<Builder> branches) override;
absl::Status For(ExecutionScopeId execution_scope_id, int32_t num_iteration,
DeviceMemory<int32_t> loop_counter,
Builder body_builder) override;
absl::Status While(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> pred,
ExecutionScopeBuilder cond_builder,
Builder body_builder) override;
absl::Status Finalize() override;
absl::Status Update() override;
GpuGraphExecHandle executable() const { return exec_; }
GpuGraphHandle graph() const { return graph_; }
Mode mode() const override { return mode_; }
State state() const override { return state_; }
static GpuCommandBuffer* Cast(CommandBuffer* command_buffer) {
return static_cast<GpuCommandBuffer*>(command_buffer);
}
static const GpuCommandBuffer* Cast(const CommandBuffer* command_buffer) {
return static_cast<const GpuCommandBuffer*>(command_buffer);
}
absl::Span<const GpuGraphNodeInfo> nodes(ExecutionScopeId id) const;
absl::Span<const GpuGraphBarrierInfo> barriers(ExecutionScopeId id) const;
absl::Span<const GpuGraphNodeInfo> nodes() const {
return nodes(kDefaulExecutionScope);
}
absl::Span<const GpuGraphBarrierInfo> barriers() const {
return barriers(kDefaulExecutionScope);
}
private:
absl::Status Trace(Stream* stream,
absl::AnyInvocable<absl::Status()> function) override;
static int64_t AllocatedExecs();
static int64_t AliveExecs();
private:
using Dependencies = absl::InlinedVector<GpuGraphNodeHandle, 1>;
using NoOpKernel = TypedKernel<>;
using SetIfConditionKernel =
TypedKernel<GpuGraphConditionalHandle, DeviceMemory<bool>>;
using SetIfElseConditionKernel =
TypedKernel<GpuGraphConditionalHandle, GpuGraphConditionalHandle,
DeviceMemory<bool>>;
using SetCaseConditionKernel =
TypedKernel<GpuGraphConditionalHandle, GpuGraphConditionalHandle,
GpuGraphConditionalHandle, GpuGraphConditionalHandle,
GpuGraphConditionalHandle, GpuGraphConditionalHandle,
GpuGraphConditionalHandle, GpuGraphConditionalHandle,
DeviceMemory<int32_t>, int32_t>;
using SetForConditionKernel =
TypedKernel<GpuGraphConditionalHandle, DeviceMemory<int32_t>, int32_t>;
using SetWhileConditionKernel =
TypedKernel<GpuGraphConditionalHandle, DeviceMemory<bool>>;
using SetConditionFn = std::function<absl::Status(
ExecutionScopeId, absl::Span<const GpuGraphConditionalHandle>)>;
using ConditionBuilder =
std::function<absl::Status(CommandBuffer*, GpuGraphConditionalHandle)>;
static ConditionBuilder ToConditionBuilder(Builder builder);
using ConditionType = typename GpuDriver::GpuGraphConditionalNodeParams::Type;
struct ScopedGpuGraphExec {
ScopedGpuGraphExec(GpuCommandBuffer* cmd_buffer, GpuGraphExecHandle exec);
~ScopedGpuGraphExec();
GpuCommandBuffer* cmd_buffer;
GpuGraphExecHandle restore;
bool restore_is_owned;
};
struct ConditionalCommandBuffers {
std::vector<GpuGraphConditionalHandle> handles;
std::vector<std::unique_ptr<GpuCommandBuffer>> command_buffers;
};
using AllocationResult = std::pair<GpuDevicePtr, uint64_t>;
absl::StatusOr<std::vector<GpuGraphConditionalHandle>>
CreateConditionalHandles(size_t num_handles);
absl::StatusOr<std::vector<std::unique_ptr<GpuCommandBuffer>>>
CreateConditionalCommandBuffers(
absl::Span<const GpuGraphConditionalHandle> handles,
absl::Span<const GpuGraphHandle> graphs,
absl::Span<const ConditionBuilder> builders);
absl::Status UpdateConditionalCommandBuffers(
absl::Span<const GpuGraphConditionalHandle> handles,
absl::Span<const std::unique_ptr<GpuCommandBuffer>> command_buffers,
absl::Span<const ConditionBuilder> builders);
absl::StatusOr<std::vector<GpuGraphHandle>> CreateConditionalNodes(
ExecutionScopeId execution_scope_id, ConditionType type,
absl::Span<const GpuGraphConditionalHandle> handles);
absl::Status CreateConditionalCommand(
ExecutionScopeId execution_scope_id, ConditionType type,
SetConditionFn set_condition,
absl::Span<const ConditionBuilder> builders);
Dependencies GetBarrier(ExecutionScopeId execution_scope_id);
absl::StatusOr<SetIfConditionKernel*> GetSetIfConditionKernel();
absl::StatusOr<SetIfElseConditionKernel*> GetSetIfElseConditionKernel();
absl::StatusOr<SetCaseConditionKernel*> GetSetCaseConditionKernel();
absl::StatusOr<SetForConditionKernel*> GetSetForConditionKernel();
absl::StatusOr<SetWhileConditionKernel*> GetSetWhileConditionKernel();
absl::StatusOr<NoOpKernel*> GetNoOpKernel();
absl::Status DisableBarriersExecution(GpuGraphExecHandle exec);
absl::Status LaunchWithPackedArgs(
ExecutionScopeId execution_scope_id, const ThreadDim& threads,
const BlockDim& blocks, const Kernel& kernel,
const KernelArgsPackedArrayBase& packed_args);
absl::Status CheckNotFinalized();
absl::Status CheckNumCommandBuffers(
const ConditionalCommandBuffers& cmd_buffers, size_t num_cmd_buffers);
absl::StatusOr<GpuGraphNodeHandle> CreateBarrierNode(
const Dependencies& dependencies);
Dependencies GetBarrierDependencies(ExecutionScopeId execution_scope_id);
static_assert(std::is_pointer_v<GpuGraphHandle>,
"GpuGraphHandle must be a pointer");
static_assert(std::is_pointer_v<GpuGraphExecHandle>,
"GpuGraphExecHandle must be a pointer");
static_assert(std::is_pointer_v<GpuGraphNodeHandle>,
"GpuGraphNodeHandle must be a pointer");
Mode mode_;
State state_ = State::kCreate;
GpuExecutor* parent_;
GpuGraphHandle graph_ = nullptr;
bool is_owned_graph_ = true;
GpuGraphExecHandle exec_ = nullptr;
bool is_owned_graph_exec_ = true;
struct ExecutionScope {
struct UpdateState {
int64_t node_idx = 0;
int64_t barrier_idx = 0;
int64_t conditional_idx = 0;
};
std::vector<GpuGraphNodeInfo> nodes;
std::vector<GpuGraphBarrierInfo> barriers;
std::vector<ConditionalCommandBuffers> conditional_command_buffers;
UpdateState update_state;
};
absl::flat_hash_map<ExecutionScopeId, ExecutionScope> execution_scopes_;
int64_t num_updates_ = 0;
SetIfConditionKernel set_if_condition_kernel_;
SetIfElseConditionKernel set_if_else_condition_kernel_;
SetCaseConditionKernel set_case_condition_kernel_;
SetForConditionKernel set_for_condition_kernel_;
SetWhileConditionKernel set_while_condition_kernel_;
NoOpKernel noop_kernel_;
};
void* GetNoOpKernel();
std::string_view GetSetIfConditionKernel();
std::string_view GetSetIfElseConditionKernel();
std::string_view GetSetCaseConditionKernel();
std::string_view GetSetForConditionKernel();
std::string_view GetSetWhileConditionKernel();
}
#endif
#include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include <array>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_kernel.h"
#include "xla/stream_executor/gpu/gpu_kernels.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
namespace stream_executor::gpu {
using Mode = CommandBuffer::Mode;
using State = CommandBuffer::State;
std::string_view to_string(State state) {
switch (state) {
case State::kCreate:
return "create";
case State::kUpdate:
return "update";
case State::kFinalized:
return "finalized";
}
}
absl::Status UnsupportedStateError(State state) {
return absl::InternalError(
absl::StrCat("Unsupported command buffer state: ", to_string(state)));
}
static std::atomic<int64_t> allocated_execs(0);
static std::atomic<int64_t> alive_execs(0);
static int64_t NotifyExecCreated() {
alive_execs.fetch_add(1, std::memory_order_relaxed);
return allocated_execs.fetch_add(1, std::memory_order_relaxed);
}
static int64_t NotifyExecDestroyed() {
DCHECK_GE(alive_execs.load(std::memory_order_relaxed), 1);
return alive_execs.fetch_sub(1, std::memory_order_relaxed) - 1;
}
int64_t GpuCommandBuffer::AllocatedExecs() {
return allocated_execs.load(std::memory_order_relaxed);
}
int64_t GpuCommandBuffer::AliveExecs() {
return alive_execs.load(std::memory_order_relaxed);
}
static std::string_view ModeToString(CommandBuffer::Mode mode) {
switch (mode) {
case CommandBuffer::Mode::kPrimary:
return "primary";
case CommandBuffer::Mode::kNested:
return "nested";
}
}
GpuCommandBuffer::GpuCommandBuffer(Mode mode, GpuExecutor* parent,
GpuGraphHandle graph, bool is_owned_graph)
: mode_(mode),
parent_(parent),
graph_(graph),
is_owned_graph_(is_owned_graph) {
VLOG(5) << "Created command buffer for graph " << graph_
<< "; mode=" << ModeToString(mode)
<< "; is_owned_graph=" << is_owned_graph_;
execution_scopes_.try_emplace(kDefaulExecutionScope);
}
GpuCommandBuffer::~GpuCommandBuffer() {
if (exec_ != nullptr && is_owned_graph_exec_) {
VLOG(5) << "Destroy GPU command buffer executable graph " << exec_ << " "
<< "(remaining alive executable graphs: " << NotifyExecDestroyed()
<< ")";
if (auto status = GpuDriver::DestroyGraphExec(exec_); !status.ok()) {
LOG(ERROR) << "Failed to destroy GPU graph exec: " << status.message();
}
}
if (graph_ != nullptr && is_owned_graph_) {
if (auto status = GpuDriver::DestroyGraph(graph_); !status.ok()) {
LOG(ERROR) << "Failed to destroy GPU graph: " << status.message();
}
}
}
GpuCommandBuffer::ScopedGpuGraphExec::ScopedGpuGraphExec(
GpuCommandBuffer* cmd_buffer, GpuGraphExecHandle exec)
: cmd_buffer(cmd_buffer),
restore(cmd_buffer->exec_),
restore_is_owned(cmd_buffer->is_owned_graph_exec_) {
cmd_buffer->exec_ = exec;
cmd_buffer->is_owned_graph_exec_ = false;
}
GpuCommandBuffer::ScopedGpuGraphExec::~ScopedGpuGraphExec() {
cmd_buffer->exec_ = restore;
cmd_buffer->is_owned_graph_exec_ = restore_is_owned;
}
static GpuDevicePtr AsDevicePtr(const DeviceMemoryBase& mem) {
return reinterpret_cast<GpuDevicePtr>(const_cast<void*>(mem.opaque()));
}
absl::Status GpuCommandBuffer::Trace(
Stream* stream, absl::AnyInvocable<absl::Status()> function) {
TF_RETURN_IF_ERROR(CheckNotFinalized());
#if defined(TENSORFLOW_USE_ROCM)
TF_ASSIGN_OR_RETURN(size_t count, GpuDriver::GraphGetNodeCount(graph_));
if (count != 0 || !is_owned_graph_)
return absl::InternalError(
"Stream can't be traced on non empty command buffer");
#endif
VLOG(5) << "Trace into GPU command buffer graph " << graph_
<< " on a stream: " << stream;
auto gpu_stream = AsGpuStreamValue(stream);
uint64_t start_nanos = tsl::Env::Default()->NowNanos();
#if !defined(TENSORFLOW_USE_ROCM)
TF_RETURN_IF_ERROR(GpuDriver::StreamBeginCaptureToGraph(
gpu_stream, graph_, GpuDriver::StreamCaptureMode::kThreadLocal));
#else
TF_RETURN_IF_ERROR(GpuDriver::StreamBeginCapture(
gpu_stream, GpuDriver::StreamCaptureMode::kThreadLocal));
#endif
auto traced = function();
GpuGraphHandle captured_graph;
TF_RETURN_IF_ERROR(GpuDriver::StreamEndCapture(gpu_stream, &captured_graph));
#if !defined(TENSORFLOW_USE_ROCM)
DCHECK(captured_graph == graph_) << "Stream capture should update graph_";
#else
TF_RETURN_IF_ERROR(
GpuDriver::DestroyGraph(std::exchange(graph_, captured_graph)));
#endif
uint64_t end_nanos = tsl::Env::Default()->NowNanos();
if (!traced.ok())
return absl::InternalError(
absl::StrCat("Failed to capture gpu graph: ", traced.message()));
VLOG(5) << "Traced into the GPU command buffer graph " << graph_ << " (took "
<< (end_nanos - start_nanos) / 1000 << " μs)";
return absl::OkStatus();
}
GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier(
ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
return execution_scope.barriers.empty()
? Dependencies{}
: Dependencies{execution_scope.barriers.back().handle};
}
absl::StatusOr<GpuCommandBuffer::SetIfConditionKernel*>
GpuCommandBuffer::GetSetIfConditionKernel() {
if (!set_if_condition_kernel_) {
MultiKernelLoaderSpec spec(2);
spec.AddCudaPtxInMemory(gpu::GetSetIfConditionKernel(), "set_if_condition");
TF_ASSIGN_OR_RETURN(
set_if_condition_kernel_,
SetIfConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_if_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetIfElseConditionKernel*>
GpuCommandBuffer::GetSetIfElseConditionKernel() {
if (!set_if_else_condition_kernel_) {
MultiKernelLoaderSpec spec(3);
spec.AddCudaPtxInMemory(gpu::GetSetIfElseConditionKernel(),
"set_if_else_condition");
TF_ASSIGN_OR_RETURN(
set_if_else_condition_kernel_,
SetIfElseConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_if_else_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetCaseConditionKernel*>
GpuCommandBuffer::GetSetCaseConditionKernel() {
if (!set_case_condition_kernel_) {
MultiKernelLoaderSpec spec(10);
spec.AddCudaPtxInMemory(gpu::GetSetCaseConditionKernel(),
"set_case_condition");
TF_ASSIGN_OR_RETURN(
set_case_condition_kernel_,
SetCaseConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_case_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetForConditionKernel*>
GpuCommandBuffer::GetSetForConditionKernel() {
if (!set_for_condition_kernel_) {
MultiKernelLoaderSpec spec(3);
spec.AddCudaPtxInMemory(gpu::GetSetForConditionKernel(),
"set_for_condition");
TF_ASSIGN_OR_RETURN(
set_for_condition_kernel_,
SetForConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_for_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetWhileConditionKernel*>
GpuCommandBuffer::GetSetWhileConditionKernel() {
if (!set_while_condition_kernel_) {
MultiKernelLoaderSpec spec(2);
spec.AddCudaPtxInMemory(gpu::GetSetWhileConditionKernel(),
"set_while_condition");
TF_ASSIGN_OR_RETURN(
set_while_condition_kernel_,
SetWhileConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_while_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::NoOpKernel*>
GpuCommandBuffer::GetNoOpKernel() {
#if !defined(TENSORFLOW_USE_ROCM)
if (!noop_kernel_) {
MultiKernelLoaderSpec spec(0);
spec.AddCudaPtxInMemory(gpu::kNoOpKernel, "noop");
TF_ASSIGN_OR_RETURN(noop_kernel_,
NoOpKernel::FactoryType::Create(parent_, spec));
}
return &noop_kernel_;
#else
return absl::UnimplementedError(
"GpuCommandBuffer::GetNoOpKernel is not implemented.");
#endif
}
absl::Status GpuCommandBuffer::DisableBarriersExecution(
GpuGraphExecHandle exec) {
#if !defined(TENSORFLOW_USE_ROCM)
ExecutionScope& execution_scope = execution_scopes_[kDefaulExecutionScope];
for (GpuGraphBarrierInfo& barrier : execution_scope.barriers) {
if (barrier.is_barrier_node) {
TF_RETURN_IF_ERROR(
GpuDriver::GraphNodeSetEnabled(exec, barrier.handle, false));
}
}
for (ConditionalCommandBuffers& cmd_buffers :
execution_scope.conditional_command_buffers) {
for (auto& cmd_buffer : cmd_buffers.command_buffers) {
TF_RETURN_IF_ERROR(cmd_buffer->DisableBarriersExecution(exec));
}
}
#endif
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::CheckNotFinalized() {
if (state_ == State::kFinalized)
return absl::InternalError(
"Command can't be added to a command buffer after it was finalized");
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::CheckNumCommandBuffers(
const ConditionalCommandBuffers& cmd_buffers, size_t num_cmd_buffers) {
if (cmd_buffers.handles.size() != num_cmd_buffers) {
return absl::InternalError(absl::StrCat(
"Expected to have ", num_cmd_buffers,
" conditional command buffers, got ", cmd_buffers.handles.size()));
}
return absl::OkStatus();
}
absl::StatusOr<GpuGraphNodeHandle> GpuCommandBuffer::CreateBarrierNode(
const Dependencies& dependencies) {
GpuGraphNodeHandle barrier_handle = nullptr;
#if !defined(TENSORFLOW_USE_ROCM)
TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel());
TF_RETURN_IF_ERROR(GpuDriver::GraphAddKernelNode(
&barrier_handle, graph_, dependencies, "noop",
AsGpuKernel(&**noop)->AsGpuFunctionHandle(), 1, 1, 1, 1, 1, 1, 0,
nullptr, nullptr));
#else
TF_RETURN_IF_ERROR(
GpuDriver::GraphAddEmptyNode(&barrier_handle, graph_, dependencies));
#endif
return barrier_handle;
}
GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrierDependencies(
ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
auto& barriers = execution_scope.barriers;
Dependencies dependencies;
for (size_t i = barriers.empty() ? 0 : barriers.back().nodes_offset;
i < execution_scope.nodes.size(); ++i) {
dependencies.push_back(execution_scope.nodes[i].handle);
}
return dependencies;
}
absl::Status GpuCommandBuffer::Barrier(ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
if (state_ == State::kCreate) {
size_t nodes_offset = execution_scope.nodes.size();
Dependencies dependencies = GetBarrierDependencies(execution_scope_id);
if (dependencies.empty() && !execution_scope.barriers.empty()) {
execution_scope.barriers.push_back({execution_scope.barriers.back()});
return absl::OkStatus();
}
if (dependencies.size() == 1) {
execution_scope.barriers.push_back(
{execution_scope.nodes.back().handle, false, nodes_offset});
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
execution_scope_id.value()));
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Barrier(
absl::Span<const ExecutionScopeId> execution_scope_ids) {
if (execution_scope_ids.empty()) return absl::OkStatus();
if (execution_scope_ids.size() == 1) {
return Barrier(execution_scope_ids[0]);
}
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
}
if (state_ == State::kCreate) {
Dependencies dependencies;
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
dependencies.push_back(execution_scope.barriers.back().handle);
}
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
size_t nodes_offset = execution_scope.nodes.size();
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
}
return absl::OkStatus();
}
if (state_ == State::kUpdate) { | #include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/trace_command_buffer_factory.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
namespace stream_executor::gpu {
using ExecutionScopeId = CommandBuffer::ExecutionScopeId;
static Platform* GpuPlatform() {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
return PlatformManager::PlatformWithName(name).value();
}
static MultiKernelLoaderSpec GetAddI32KernelSpec() {
MultiKernelLoaderSpec spec(3);
#if defined(GOOGLE_CUDA)
spec.AddCudaPtxInMemory(internal::kAddI32Kernel, "add");
#elif defined(TENSORFLOW_USE_ROCM)
spec.AddCudaCubinInMemory(internal::kAddI32KernelModule, "add");
#endif
return spec;
}
using AddI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
using MulI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
using IncAndCmpKernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<bool>, int32_t>;
using AddI32Ptrs3 = TypedKernelFactory<internal::Ptrs3<int32_t>>;
static constexpr auto nested = CommandBuffer::Mode::kNested;
static constexpr auto primary = CommandBuffer::Mode::kPrimary;
template <typename Info>
static std::vector<GpuGraphNodeHandle> Deps(Info info) {
if (auto deps = GpuDriver::GraphNodeGetDependencies(info.handle); deps.ok()) {
return *deps;
}
return {GpuGraphNodeHandle(0xDEADBEEF)};
}
template <typename... Infos>
static std::vector<GpuGraphNodeHandle> ExpectedDeps(Infos... info) {
return {info.handle...};
}
static bool IsAtLeastCuda12300() {
#if defined(TENSORFLOW_USE_ROCM)
return false;
#endif
#if CUDA_VERSION >= 12030
return true;
#endif
return false;
}
TEST(GpuCommandBufferTest, LaunchSingleKernel) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "add");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), a, b, c));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), a, b, d));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(CudaCommandBufferTest, TraceSingleKernel) {
#if defined(TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Not supported on ROCM";
#endif
#if CUDA_VERSION < 12030
GTEST_SKIP() << "Command buffer tracing is not supported";
#endif
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(1, [&](const Kernel& kernel,
const KernelArgs& args) {
auto bufs = Cast<KernelArgsDeviceMemoryArray>(&args)->device_memory_args();
auto cast = [](auto m) { return reinterpret_cast<int32_t*>(m.opaque()); };
return PackKernelArgs(0, internal::Ptrs3<int32_t>{
cast(bufs[0]),
cast(bufs[1]),
cast(bufs[2]),
});
});
spec.AddInProcessSymbol(internal::GetAddI32Ptrs3Kernel(), "add");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Ptrs3::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
KernelArgsDeviceMemoryArray args({a, b, c}, 0);
auto cmd_buffer = TraceCommandBufferFactory::Create(
executor,
[&](Stream* stream) {
return stream->Launch(ThreadDim(), BlockDim(4), *add, args);
},
primary);
TF_ASSERT_OK(cmd_buffer.status());
TF_ASSERT_OK(executor->Submit(stream.get(), **cmd_buffer));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, LaunchNestedCommandBuffer) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec = GetAddI32KernelSpec();
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
auto primary_cmd = executor->CreateCommandBuffer(primary).value();
auto nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c));
TF_ASSERT_OK(primary_cmd->AddNestedCommandBuffer(*nested_cmd));
TF_ASSERT_OK(primary_cmd->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *primary_cmd));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, d));
TF_ASSERT_OK(primary_cmd->Update());
TF_ASSERT_OK(primary_cmd->AddNestedCommandBuffer(*nested_cmd));
TF_ASSERT_OK(primary_cmd->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *primary_cmd));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, MemcpyDeviceToDevice) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->MemcpyDeviceToDevice(&b, a, byte_length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected = {42, 42, 42, 42};
ASSERT_EQ(dst, expected);
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->MemcpyDeviceToDevice(&a, b, byte_length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(stream->Memset32(&a, 0, byte_length));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, Memset) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Memset(&a, uint32_t{42}, length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected = {42, 42, 42, 42};
ASSERT_EQ(dst, expected);
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->Memset(&a, uint32_t{43}, length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
expected = {43, 43, 43, 43};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, Barriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 6; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[5], bit_pattern + 5, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
ASSERT_EQ(gpu_cmd_buffer->nodes().size(), 6);
ASSERT_EQ(gpu_cmd_buffer->barriers().size(), 6);
auto nodes = gpu_cmd_buffer->nodes();
auto barriers = gpu_cmd_buffer->barriers();
EXPECT_TRUE(barriers[0].is_barrier_node);
EXPECT_TRUE(Deps(barriers[0]).empty());
EXPECT_FALSE(barriers[1].is_barrier_node);
EXPECT_EQ(barriers[1].handle, nodes[0].handle);
EXPECT_FALSE(barriers[2].is_barrier_node);
EXPECT_FALSE(barriers[3].is_barrier_node);
EXPECT_EQ(barriers[2].handle, nodes[1].handle);
EXPECT_EQ(barriers[3].handle, nodes[1].handle);
EXPECT_TRUE(barriers[4].is_barrier_node);
EXPECT_TRUE(barriers[5].is_barrier_node);
EXPECT_EQ(Deps(barriers[4]), ExpectedDeps(nodes[2], nodes[3]));
EXPECT_EQ(Deps(barriers[5]), ExpectedDeps(nodes[4], nodes[5]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
expected = {43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, IndependentExecutionScopes) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 4; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> expected = {42, 43, 44, 45};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 2);
ASSERT_EQ(nodes1.size(), 2);
ASSERT_EQ(barriers0.size(), 1);
ASSERT_EQ(barriers1.size(), 1);
EXPECT_TRUE(barriers0[0].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
expected = {43, 44, 45, 46};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ExecutionScopeBarriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
CommandBuffer::ExecutionScopeId s2 = CommandBuffer::ExecutionScopeId(2);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 7; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1, s2}));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[5], bit_pattern + 5, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s2, &buffers[6], bit_pattern + 6, 1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto nodes2 = gpu_cmd_buffer->nodes(s2);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
auto barriers2 = gpu_cmd_buffer->barriers(s2);
ASSERT_EQ(nodes0.size(), 3);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(nodes2.size(), 1);
ASSERT_EQ(barriers0.size(), 2);
ASSERT_EQ(barriers1.size(), 2);
ASSERT_EQ(barriers2.size(), 2);
EXPECT_TRUE(barriers0[0].is_barrier_node && barriers0[1].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node && barriers1[1].is_barrier_node);
EXPECT_TRUE(barriers2[0].is_barrier_node && barriers2[1].is_barrier_node);
EXPECT_TRUE(barriers0[1].handle == barriers1[1].handle);
EXPECT_TRUE(barriers1[1].handle == barriers2[1].handle);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
EXPECT_TRUE(Deps(barriers2[0]).empty());
EXPECT_EQ(Deps(barriers2[1]),
ExpectedDeps(barriers0[0], barriers1[0], barriers2[0]));
EXPECT_EQ(Deps(nodes0[2]), ExpectedDeps(barriers0[1]));
EXPECT_EQ(Deps(nodes1[2]), ExpectedDeps(barriers1[1]));
EXPECT_EQ(Deps(nodes2[0]), ExpectedDeps(barriers2[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
expected = {43, 44, 45, 46, 47, 48, 49};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ExecutionScopeOneDirectionalBarriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 6; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0, s1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[5], bit_pattern + 5, 1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 3);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(barriers0.size(), 1);
ASSERT_EQ(barriers1.size(), 2);
EXPECT_TRUE(barriers0[0].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node && barriers1[1].is_barrier_node);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
EXPECT_EQ(Deps(barriers1[1]), ExpectedDeps(barriers0[0], barriers1[0]));
EXPECT_EQ(Deps(nodes0[2]), ExpectedDeps(barriers0[0]));
EXPECT_EQ(Deps(nodes1[2]), ExpectedDeps(barriers1[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
expected = {43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ConditionalIf) {
if (!IsAtLeastCuda12300()) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "add");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> zeroes = {0, 0, 0, 0};
ASSERT_EQ(dst, zeroes);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, d);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfElse) {
if (!IsAtLeastCuda12300()) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "add");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "mul");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder else_builder = [&](CommandBuffer* else_cmd) {
return else_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->IfElse(pred, then_builder, else_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(executor->Submit(stream.get(), *cmd_buffer));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_mul = {6, 6, 6, 6};
ASSERT_EQ(dst, expected_mul);
DeviceMemory<int32_t> d = executor->All |
1,835 | cpp | tensorflow/tensorflow | scatter_expander | third_party/xla/xla/service/gpu/transforms/scatter_expander.cc | third_party/xla/xla/service/scatter_expander_test.cc | #ifndef XLA_SERVICE_SCATTER_EXPANDER_H_
#define XLA_SERVICE_SCATTER_EXPANDER_H_
#include "xla/service/op_expander_pass.h"
namespace xla {
class ScatterExpander : public OpExpanderPass {
public:
enum Mode {
kEliminateAllScatters,
kEliminateSimpleScatters,
kEliminateIndeterministicScatters,
};
explicit ScatterExpander(Mode m) : mode_(m) {}
absl::string_view name() const override { return "scatter_expander"; }
protected:
bool InstructionMatchesPattern(HloInstruction* inst) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* inst) override;
private:
Mode mode_;
};
}
#endif
#include "xla/service/scatter_expander.h"
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/while_util.h"
namespace xla {
static absl::StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* scatter_indices, int64_t index_vector_dim) {
const Shape& scatter_indices_shape = scatter_indices->shape();
if (scatter_indices_shape.dimensions_size() == index_vector_dim) {
return scatter_indices;
}
if (index_vector_dim == (scatter_indices_shape.dimensions_size() - 1)) {
return scatter_indices;
}
std::vector<int64_t> permutation;
permutation.reserve(scatter_indices_shape.dimensions_size());
for (int64_t i = 0, e = scatter_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
permutation.push_back(i);
}
}
permutation.push_back(index_vector_dim);
return MakeTransposeHlo(scatter_indices, permutation);
}
static absl::StatusOr<HloInstruction*> CanonicalizeScatterIndices(
HloInstruction* scatter_indices, int64_t index_vector_dim) {
TF_ASSIGN_OR_RETURN(
HloInstruction * transposed_scatter_indices,
TransposeIndexVectorDimToLast(scatter_indices, index_vector_dim));
if (scatter_indices->shape().rank() == index_vector_dim + 1 &&
scatter_indices->shape().dimensions(index_vector_dim) == 1) {
auto new_shape =
ShapeUtil::DeleteDimension(index_vector_dim, scatter_indices->shape());
TF_ASSIGN_OR_RETURN(scatter_indices,
MakeReshapeHlo(new_shape, scatter_indices));
}
bool indices_are_scalar =
index_vector_dim == scatter_indices->shape().dimensions_size();
const int64_t index_dims_in_scatter_indices = indices_are_scalar ? 0 : 1;
const Shape& shape = transposed_scatter_indices->shape();
if (shape.dimensions_size() == index_dims_in_scatter_indices) {
return PrependDegenerateDims(transposed_scatter_indices, 1);
} else {
return CollapseFirstNDims(
transposed_scatter_indices,
shape.dimensions_size() - index_dims_in_scatter_indices);
}
}
static absl::StatusOr<HloInstruction*> PermuteScatterAndWindowDims(
HloInstruction* updates, absl::Span<const int64_t> update_window_dims) {
std::vector<int64_t> permutation;
const int64_t updates_rank = updates->shape().rank();
permutation.reserve(updates_rank);
for (int64_t i = 0; i < updates_rank; ++i) {
bool is_scatter_dim = !absl::c_binary_search(update_window_dims, i);
if (is_scatter_dim) {
permutation.push_back(i);
}
}
for (auto window_dim : update_window_dims) {
permutation.push_back(window_dim);
}
return MakeTransposeHlo(updates, permutation);
}
static absl::StatusOr<HloInstruction*> AdjustScatterDims(
const Shape& scatter_indices_shape, HloInstruction* updates,
int64_t index_vector_dim) {
int64_t num_scatter_dims = scatter_indices_shape.dimensions_size();
if (index_vector_dim < scatter_indices_shape.dimensions_size()) {
--num_scatter_dims;
}
if (num_scatter_dims == 0) {
return PrependDegenerateDims(updates, 1);
}
return CollapseFirstNDims(updates, num_scatter_dims);
}
static absl::StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
HloInstruction* index_vector, const ScatterDimensionNumbers& dim_numbers,
int64_t operand_rank) {
HloComputation* computation = index_vector->parent();
const Shape& index_shape = index_vector->shape();
if (operand_rank == 0) {
return computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0})));
}
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
std::vector<HloInstruction*> expanded_index_components;
for (int i = 0; i < operand_rank; i++) {
int64_t index_vector_dim_index =
FindIndex(dim_numbers.scatter_dims_to_operand_dims(), i);
if (index_vector_dim_index !=
dim_numbers.scatter_dims_to_operand_dims_size()) {
TF_ASSIGN_OR_RETURN(
HloInstruction * component_to_concat,
MakeSliceHlo(index_vector, {index_vector_dim_index},
{index_vector_dim_index + 1},
{1}));
expanded_index_components.push_back(component_to_concat);
} else {
expanded_index_components.push_back(zero);
}
}
return MakeConcatHlo(expanded_index_components, 0);
}
static absl::StatusOr<HloInstruction*> CheckIndexValidity(
HloComputation* computation, HloInstruction* index,
absl::Span<const int64_t> operand_dims,
absl::Span<const int64_t> window_sizes, HloModule* module) {
DCHECK_NE(nullptr, module);
DCHECK_EQ(operand_dims.size(), window_sizes.size());
HloInstruction* zero_index = BroadcastZeros(
computation, index->shape().element_type(), index->shape().dimensions());
TF_ASSIGN_OR_RETURN(
HloInstruction * negative_index_check,
MakeCompareHlo(ComparisonDirection::kLe, zero_index, index));
std::vector<int64_t> max_valid_index(operand_dims.size());
for (int i = 0; i < operand_dims.size(); ++i) {
max_valid_index[i] = operand_dims[i] - window_sizes[i];
}
TF_ASSIGN_OR_RETURN(
HloInstruction * max_valid_index_constant,
MakeR1ConstantHlo<int64_t>(computation, index->shape().element_type(),
max_valid_index));
TF_ASSIGN_OR_RETURN(HloInstruction * oob_index_check,
MakeCompareHlo(ComparisonDirection::kGe,
max_valid_index_constant, index));
TF_ASSIGN_OR_RETURN(
HloInstruction * valid_index,
MakeBinaryHlo(HloOpcode::kAnd, negative_index_check, oob_index_check));
auto reduction_init = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSIGN_OR_RETURN(
HloInstruction * valid_index_reduced,
MakeReduceHlo(valid_index, reduction_init, HloOpcode::kAnd, module));
return MakeBroadcastHlo(valid_index_reduced, {}, window_sizes);
}
static absl::StatusOr<HloComputation*> CallAndGetOutput(
HloComputation* original, int output_index) {
HloInstruction* original_root = original->root_instruction();
if (!original_root->shape().IsTuple()) {
return original;
}
HloComputation* new_comp = [&] {
HloComputation::Builder builder(
absl::StrCat(original->name(), ".dup.", output_index));
for (int i = 0, n = original->num_parameters(); i < n; ++i) {
HloInstruction* original_param = original->parameter_instruction(i);
builder.AddInstruction(HloInstruction::CreateParameter(
i, original_param->shape(), original_param->name()));
}
return original->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* call_original = new_comp->AddInstruction(
HloInstruction::CreateCall(original_root->shape(),
new_comp->parameter_instructions(), original));
new_comp->set_root_instruction(
new_comp->AddInstruction(
HloInstruction::CreateGetTupleElement(call_original, output_index)),
true);
TF_RETURN_IF_ERROR(CallInliner::Inline(call_original).status());
return new_comp;
}
static absl::StatusOr<std::vector<HloInstruction*>> ScatterLoopBody(
HloScatterInstruction* scatter, HloInstruction* induction_var,
absl::Span<HloInstruction* const> loop_state) {
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
CHECK_EQ(loop_state.size(), scatter->operand_count());
auto operands = loop_state.first(scatter->scatter_operand_count());
HloInstruction* scatter_indices = loop_state[operands.size()];
auto updates = loop_state.last(operands.size());
bool has_scalar_indices = scatter_indices->shape().dimensions_size() == 1;
HloInstruction* induction_var_as_vector =
MakeBroadcastHlo(induction_var, {},
{1});
HloInstruction* index_vector;
if (has_scalar_indices) {
TF_ASSIGN_OR_RETURN(
index_vector,
MakeDynamicSliceHlo(scatter_indices, induction_var_as_vector, {1}));
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_scatter_indices,
PadVectorWithZeros(induction_var_as_vector,
0, 1));
int index_vector_size = scatter_indices->shape().dimensions(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * index_vector_2d,
MakeDynamicSliceHlo(scatter_indices, index_into_scatter_indices,
{1, index_vector_size}));
TF_ASSIGN_OR_RETURN(index_vector,
ElideDegenerateDims(index_vector_2d, {0}));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * scatter_slice_start,
ExpandIndexVectorIntoOperandSpace(
index_vector, dim_numbers, operands[0]->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_updates,
PadVectorWithZeros(
induction_var_as_vector, 0,
updates[0]->shape().dimensions_size() - 1));
std::vector<int64_t> update_slice_bounds(
updates[0]->shape().dimensions().begin(),
updates[0]->shape().dimensions().end());
update_slice_bounds[0] = 1;
absl::InlinedVector<HloInstruction*, 2> map_operands(
operands.size() + updates.size(), nullptr);
auto operand_slices_to_update =
absl::MakeSpan(map_operands).first(operands.size());
auto update_slices_with_dims_inserted =
absl::MakeSpan(map_operands).last(updates.size());
absl::Span<const int64_t> actual_update_slice_dims;
for (int i = 0, n = operands.size(); i < n; ++i) {
HloInstruction* update = updates[i];
TF_ASSIGN_OR_RETURN(
HloInstruction * update_slice,
MakeDynamicSliceHlo(update, index_into_updates, update_slice_bounds));
TF_ASSIGN_OR_RETURN(HloInstruction * update_slice_for_scatter,
ElideDegenerateDims(update_slice, {0}));
TF_ASSIGN_OR_RETURN(
HloInstruction * update_slice_with_dims_inserted,
InsertDegenerateDims(update_slice_for_scatter,
dim_numbers.inserted_window_dims()));
update_slices_with_dims_inserted[i] = update_slice_with_dims_inserted;
HloInstruction* operand = operands[i];
const Shape& update_slice_shape = update_slice_with_dims_inserted->shape();
TF_ASSIGN_OR_RETURN(HloInstruction * operand_slice_to_update,
MakeDynamicSliceHlo(operand, scatter_slice_start,
update_slice_shape.dimensions()));
operand_slices_to_update[i] = operand_slice_to_update;
if (i == 0) {
actual_update_slice_dims = update_slice_shape.dimensions();
} else {
TF_RET_CHECK(actual_update_slice_dims == update_slice_shape.dimensions());
}
}
TF_ASSIGN_OR_RETURN(
HloInstruction * is_index_valid,
CheckIndexValidity(operands[0]->parent(), scatter_slice_start,
operands[0]->shape().dimensions(),
actual_update_slice_dims, scatter->GetModule()));
std::vector<HloInstruction*> updated_loop_state;
updated_loop_state.reserve(loop_state.size());
for (int i = 0, n = operands.size(); i < n; ++i) {
TF_ASSIGN_OR_RETURN(HloComputation * to_apply,
CallAndGetOutput(scatter->to_apply(), i));
TF_ASSIGN_OR_RETURN(HloInstruction * updated_operand_slice,
MakeMapHlo(map_operands, to_apply));
TF_ASSIGN_OR_RETURN(HloInstruction * updates_to_apply,
MakeSelectHlo(is_index_valid, updated_operand_slice,
operand_slices_to_update[i]));
TF_ASSIGN_OR_RETURN(HloInstruction * updated_operand,
MakeDynamicUpdateSliceHlo(operands[i], updates_to_apply,
scatter_slice_start));
updated_loop_state.push_back(updated_operand);
}
updated_loop_state.push_back(scatter_indices);
absl::c_copy(updates, std::back_inserter(updated_loop_state));
return updated_loop_state;
}
static int64_t ScatterTripCount(const HloScatterInstruction* scatter) {
const HloInstruction* scatter_indices = scatter->scatter_indices();
const Shape& scatter_indices_shape = scatter_indices->shape();
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
int64_t scatter_loop_trip_count = 1;
for (int64_t i = 0, e = scatter_indices_shape.dimensions_size(); i < e; i++) {
if (i != dim_numbers.index_vector_dim()) {
scatter_loop_trip_count *= scatter_indices_shape.dimensions(i);
}
}
return scatter_loop_trip_count;
}
absl::StatusOr<HloInstruction*> ScatterExpander::ExpandInstruction(
HloInstruction* inst) {
auto* scatter = Cast<HloScatterInstruction>(inst);
auto scatter_operands = scatter->scatter_operands();
HloInstruction* scatter_indices = scatter->scatter_indices();
auto scatter_updates = scatter->scatter_updates();
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
if (ShapeUtil::IsZeroElementArray(scatter_updates[0]->shape())) {
if (scatter_operands.size() == 1) {
return scatter_operands[0];
}
return scatter->parent()->AddInstruction(
HloInstruction::CreateTuple(scatter_operands));
}
int64_t scatter_loop_trip_count = ScatterTripCount(scatter);
if (!IsInt32(scatter_loop_trip_count)) {
return Unimplemented(
"Scatter operations with more than 2147483647 scatter indices are not "
"supported. This error occurred for %s.",
scatter->ToString());
}
TF_ASSIGN_OR_RETURN(HloInstruction * canonical_scatter_indices,
CanonicalizeScatterIndices(
scatter_indices, dim_numbers.index_vector_dim()));
CHECK_EQ(scatter_loop_trip_count,
canonical_scatter_indices->shape().dimensions(0));
std::vector<HloInstruction*> adjusted_canonical_updates;
adjusted_canonical_updates.reserve(scatter_updates.size());
for (HloInstruction* update : scatter_updates) {
TF_ASSIGN_OR_RETURN(
HloInstruction * canonical_update,
PermuteScatterAndWindowDims(update, dim_numbers.update_window_dims()));
TF_ASSIGN_OR_RETURN(
HloInstruction * adjusted_canonical_update,
AdjustScatterDims(scatter_indices->shape(), canonical_update,
dim_numbers.index_vector_dim()));
CHECK_EQ(scatter_loop_trip_count,
adjusted_canonical_update->shape().dimensions(0));
adjusted_canonical_updates.push_back(adjusted_canonical_update);
}
std::vector<HloInstruction*> loop_state;
loop_state.reserve(scatter->operand_count());
absl::c_copy(scatter_operands, std::back_inserter(loop_state));
loop_state.push_back(canonical_scatter_indices);
absl::c_copy(adjusted_canonical_updates, std::back_inserter(loop_state));
absl::StatusOr<std::vector<HloInstruction*>> scatter_loop_result_status =
WhileUtil::MakeCountedLoop(
scatter->parent(), scatter_loop_trip_count, loop_state,
[scatter](HloInstruction* induction_var,
const std::vector<HloInstruction*>& loop_state) {
return ScatterLoopBody(scatter, induction_var, loop_state);
},
scatter->metadata());
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> scatter_loop_result,
scatter_loop_result_status);
auto results =
absl::MakeSpan(scatter_loop_result).first(scatter_operands.size());
return MaybeMakeTuple(results);
}
namespace {
bool IsCombinerAssociative(const HloComputation* combiner) {
if (combiner->instruction_count() != 3) {
return false;
}
switch (combiner->root_instruction()->opcode()) {
case HloOpcode::kMinimum:
case HloOpcode::kMaximum:
return true;
case HloOpcode::kAdd:
case HloOpcode::kMultiply:
case HloOpcode::kOr:
case HloOpcode::kXor:
return combiner->root_instruction()->shape().IsInteger();
default:
return false;
}
}
bool IsDeterministic(const HloScatterInstruction* scatter) {
if (scatter->unique_indices()) {
return true;
}
if (IsCombinerAssociative(scatter->to_apply())) {
return true;
}
return false;
}
}
bool ScatterExpander::InstructionMatchesPattern(HloInstruction* inst) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return (scatter != nullptr) && (mode_ == kEliminateAllScatters ||
(mode_ == kEliminateSimpleScatters &&
ScatterTripCount(scatter) == 1) ||
(mode_ == kEliminateIndeterministicScatters &&
!IsDeterministic(scatter)));
}
} | #include "xla/service/scatter_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class ScatterExpanderTest : public HloTestBase {
protected:
void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {
HloInstruction* inst = FindInstruction(module, inst_name);
inst->mutable_shape()->clear_layout();
}
};
TEST_F(ScatterExpanderTest, ScatterOperandWithoutLayout) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update = s32[] constant(0)
ROOT scatter = s32[5]{0} scatter(operand, indices, update),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, ScatterMultipleOperandsWithoutLayout) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[5] iota(), iota_dimension=0
operand1 = f32[5] constant({2,4,6,8,10})
indices = s32[1] parameter(0)
update0 = s32[] constant(0)
update1 = f32[] constant(1)
ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, EliminateSimpleScattersSkipsNontrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest,
EliminateSimpleMultioutpuScattersSkipsNontrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[3,3] parameter(0)
operand1 = bf16[3,3] parameter(1)
indices = s32[2] parameter(2)
update0 = s32[2,3] parameter(3)
update1 = bf16[2,3] parameter(4)
ROOT scatter = (s32[3,3], bf16[3,3]) scatter(operand0, operand1, indices, update0, update1),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest, EliminateSimpleScattersRewritesTrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update = s32[] constant(0)
ROOT scatter = s32[5]{0} scatter(operand, indices, update),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest,
EliminateSimpleMultioutputScattersRewritesTrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[5] iota(), iota_dimension=0
operand1 = f32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update0 = s32[] constant(0)
update1 = f32[] constant(0)
ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeCombiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = s32[] parameter(1)
arg0.172 = s32[] parameter(0)
ROOT add.48 = s32[] add(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = s32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = s32[4096,1,1] parameter(2)
ROOT scatter.48 = s32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest, EliminateScatterWithNonAssociativeCombiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = f32[] parameter(1)
arg0.172 = f32[] parameter(0)
ROOT add.48 = f32[] add(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = f32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = f32[4096,1,1] parameter(2)
ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeFp32Combiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = f32[] parameter(1)
arg0.172 = f32[] parameter(0)
ROOT max.48 = f32[] maximum(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = f32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = f32[4096,1,1] parameter(2)
ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
}
} |
1,836 | cpp | tensorflow/tensorflow | reduce_scatter_decomposer | third_party/xla/xla/service/reduce_scatter_decomposer.cc | third_party/xla/xla/service/reduce_scatter_decomposer_test.cc | #ifndef XLA_SERVICE_REDUCE_SCATTER_DECOMPOSER_H_
#define XLA_SERVICE_REDUCE_SCATTER_DECOMPOSER_H_
#include <functional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReduceScatterDecomposer : public HloModulePass {
public:
explicit ReduceScatterDecomposer(
std::function<void(Shape&)> update_layout = nullptr,
std::function<bool(const HloInstruction*)> should_decompose = nullptr)
: update_layout_(update_layout), should_decompose_(should_decompose) {}
absl::string_view name() const override {
return "reduce-scatter-decomposer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
std::function<void(Shape&)> update_layout_;
std::function<bool(const HloInstruction*)> should_decompose_;
};
}
#endif
#include "xla/service/reduce_scatter_decomposer.h"
#include <sys/types.h>
#include <limits>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> ReduceScatterDecomposer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
bool changed = false;
int64_t next_channel_id = hlo_query::NextChannelId(*module);
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
auto *rs = DynCast<HloReduceScatterInstruction>(instruction);
if (!rs || !rs->shape().IsArray()) {
continue;
}
std::optional<int64_t> channel_id;
if (rs->channel_id()) {
channel_id = next_channel_id++;
}
if (should_decompose_ && !should_decompose_(rs)) {
continue;
}
VLOG(2) << "Decompose: " << rs->ToString();
HloComputation *apply_clone = module->AddComputationAndUnifyNamesAndIds(
rs->to_apply()->Clone(), false);
HloInstruction *ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
rs->operand(0)->shape(), rs->operands(), apply_clone,
rs->device_list(), rs->constrain_layout(), channel_id,
rs->use_global_device_ids()));
apply_clone->SetCollectiveCallInstruction(ar);
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(rs->channel_id().has_value(),
rs->use_global_device_ids()));
TF_ASSIGN_OR_RETURN(
std::vector<HloInstruction *> start_indices,
CreateStartIndicesForCollectiveDecomposition(
group_mode, rs->replica_groups(), rs->shape(),
rs->scatter_dimension(), computation, update_layout_));
HloInstruction *ds =
computation->AddInstruction(HloInstruction::CreateDynamicSlice(
rs->shape(), ar, start_indices, rs->shape().dimensions()));
TF_RETURN_IF_ERROR(rs->ReplaceAllUsesWith(ds));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_decomposer.h"
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ReduceScatterDecomposerTest : public HloTestBase {
public:
enum class PassAction {
kNoChange,
kTrivialGroups,
kTableLookup,
};
void RunPass(
absl::string_view hlo_module, PassAction action,
CollectiveOpGroupMode mode = CollectiveOpGroupMode::kCrossReplica,
int64_t shard_size = 0, int64_t shard_dimension = 0,
int64_t replica_count = 2,
std::function<bool(const HloInstruction *)> should_decompose =
[](const HloInstruction *) { return true; }) {
const int64_t partition_count = 2;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(hlo_module, replica_count,
partition_count));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ReduceScatterDecomposer(nullptr,
should_decompose)
.Run(module.get()));
if (action == PassAction::kNoChange) {
ASSERT_FALSE(changed);
return;
}
ASSERT_TRUE(changed);
Literal multiplier = LiteralUtil::CreateR0<uint32_t>(shard_size);
::testing::Matcher<const ::xla::HloInstruction *> id_matcher = [&]() {
switch (mode) {
case CollectiveOpGroupMode::kCrossPartition:
return op::PartitionId();
case CollectiveOpGroupMode::kCrossReplica:
return op::ReplicaId();
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return op::ReplicaId();
case CollectiveOpGroupMode::kFlattenedID: {
return op::Add(
op::Multiply(op::ReplicaId(),
op::Constant(LiteralUtil::CreateR0<uint32_t>(
partition_count))),
op::PartitionId());
}
}
}();
auto root = module->entry_computation()->root_instruction();
const Shape &shape = root->shape();
::testing::Matcher<const ::xla::HloInstruction *> slice_index = id_matcher;
if (action == PassAction::kTableLookup) {
slice_index = op::Reshape(op::DynamicSlice(op::Constant(), id_matcher));
}
if (mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
slice_index = op::Add(
op::Multiply(
slice_index,
op::Constant(LiteralUtil::CreateR0<uint32_t>(partition_count))),
op::PartitionId());
}
auto zero_matcher = op::Constant(LiteralUtil::Zero(U32));
std::vector<::testing::Matcher<const ::xla::HloInstruction *>> ds_operands(
shape.rank() + 1, zero_matcher);
ds_operands[0] = op::AllReduce(op::Parameter(0));
ds_operands[shard_dimension + 1] =
op::Multiply(slice_index, op::Constant(std::move(multiplier)));
EXPECT_THAT(root, op::DynamicSlice(ds_operands));
}
};
TEST_F(ReduceScatterDecomposerTest, TrivialReplicaID) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupReplicaId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{1, 0}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{0, 1}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplicaAndPartition,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest,
TrivialCrossReplicaAndPartition_SingleReplica) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossPartition,
4, 1, 1);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupFlattenedId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kFlattenedID,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = (f32[4, 2], f32[4,2]) reduce-scatter(p0, p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange);
}
TEST_F(ReduceScatterDecomposerTest, NoChangeWithShouldDecompose) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0,1}, {2,3}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange,
CollectiveOpGroupMode::kCrossReplica,
0, 0,
2, [](const HloInstruction *) { return false; });
}
}
} |
1,837 | cpp | tensorflow/tensorflow | batch_dot_simplification | third_party/xla/xla/service/batch_dot_simplification.cc | third_party/xla/xla/service/batch_dot_simplification_test.cc | #ifndef XLA_SERVICE_BATCH_DOT_SIMPLIFICATION_H_
#define XLA_SERVICE_BATCH_DOT_SIMPLIFICATION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class BatchDotSimplification : public HloModulePass {
public:
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::string_view name() const override { return "batch-dot-simplification"; }
private:
absl::StatusOr<bool> ElideDegenerateBatchDimensionFromBatchDot(
HloInstruction* batch_dot);
};
}
#endif
#include "xla/service/batch_dot_simplification.h"
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
absl::StatusOr<bool>
BatchDotSimplification::ElideDegenerateBatchDimensionFromBatchDot(
HloInstruction* batch_dot) {
if (Cast<HloDotInstruction>(batch_dot)->sparse_operands()) {
return false;
}
const auto& is_iota = [](absl::Span<const int64_t> dims) {
for (int64_t i = 0; i < dims.size(); ++i) {
if (dims[i] != i) {
return false;
}
}
return true;
};
if (!absl::c_equal(
batch_dot->dot_dimension_numbers().lhs_batch_dimensions(),
batch_dot->dot_dimension_numbers().rhs_batch_dimensions()) ||
!is_iota(batch_dot->dot_dimension_numbers().lhs_batch_dimensions())) {
return false;
}
const DotDimensionNumbers& dim_numbers = batch_dot->dot_dimension_numbers();
HloInstruction *lhs = batch_dot->mutable_operand(0),
*rhs = batch_dot->mutable_operand(1);
const Shape& lhs_shape = lhs->shape();
if (dim_numbers.lhs_contracting_dimensions_size() != 1) {
return false;
}
std::vector<int64_t> degenerate_dims;
for (int64_t batch_dim : dim_numbers.lhs_batch_dimensions()) {
if (lhs_shape.dimensions(batch_dim) == 1) {
degenerate_dims.push_back(batch_dim);
}
}
if (degenerate_dims.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(HloInstruction * new_lhs,
ElideDegenerateDims(lhs, degenerate_dims));
TF_ASSIGN_OR_RETURN(HloInstruction * new_rhs,
ElideDegenerateDims(rhs, degenerate_dims));
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.clear_lhs_batch_dimensions();
new_dim_numbers.clear_rhs_batch_dimensions();
for (int64_t i = 0, e = dim_numbers.lhs_batch_dimensions_size() -
degenerate_dims.size();
i < e; i++) {
new_dim_numbers.add_lhs_batch_dimensions(i);
new_dim_numbers.add_rhs_batch_dimensions(i);
}
new_dim_numbers.set_lhs_contracting_dimensions(
0,
new_dim_numbers.lhs_contracting_dimensions(0) - degenerate_dims.size());
new_dim_numbers.set_rhs_contracting_dimensions(
0,
new_dim_numbers.rhs_contracting_dimensions(0) - degenerate_dims.size());
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dot,
MakeDotHlo(new_lhs, new_rhs, new_dim_numbers,
batch_dot->precision_config(),
batch_dot->shape().element_type()));
TF_ASSIGN_OR_RETURN(HloInstruction * new_dot_reshaped,
MakeReshapeHlo(batch_dot->shape(), new_dot));
VLOG(2) << "Replaced " << batch_dot->ToString() << " with "
<< new_dot->ToString();
TF_RETURN_IF_ERROR(
batch_dot->parent()->ReplaceInstruction(batch_dot, new_dot_reshaped));
return true;
}
absl::StatusOr<bool> BatchDotSimplification::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloInstruction*> dot_instrs;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(computation->instructions(), std::back_inserter(dot_instrs),
[](HloInstruction* instr) {
return instr->opcode() == HloOpcode::kDot;
});
}
for (HloInstruction* dot_instr : dot_instrs) {
TF_ASSIGN_OR_RETURN(bool elided_batch_dim_from_one,
ElideDegenerateBatchDimensionFromBatchDot(dot_instr));
changed |= elided_batch_dim_from_one;
}
return changed;
}
} | #include "xla/service/batch_dot_simplification.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class BatchDotSimplificationTest : public HloTestBase {};
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_VectorVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,3] parameter(0)
b = f32[1,3] parameter(1)
ROOT dot = f32[1] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
0, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_MatrixVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,9,3] parameter(0)
b = f32[1,3] parameter(1)
ROOT dot = f32[1,9] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
1, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_MatrixMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,9,3] parameter(0)
b = f32[1,3,7] parameter(1)
ROOT dot = f32[1,9,7] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
1, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_VectorVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,3] parameter(0)
b = f32[9,1,7,1,3] parameter(1)
ROOT dot = f32[9,1,7,1] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
2, 2)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_VectorMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,3] parameter(0)
b = f32[9,1,7,1,20,3] parameter(1)
ROOT dot = f32[9,1,7,1,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={5}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
2, 3)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_MatrixMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,19,3] parameter(0)
b = f32[9,1,7,1,3,20] parameter(1)
ROOT dot = f32[9,1,7,1,19,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={5}, rhs_contracting_dims={4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
3, 2)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDimsNonContracting) {
const char* hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,101] parameter(0)
b = f32[1,101] parameter(1)
ROOT dot = f32[1,101,101] dot(a,b), lhs_batch_dims={0},
lhs_contracting_dims={},
rhs_batch_dims={0},
rhs_contracting_dims={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_FALSE(pass.Run(m.get()).value());
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDimsMultipleContracting) {
const char* hlo_text = R"(
HloModule BatchDot
main {
lhs = f32[1,5,17,10,13] parameter(0)
rhs = f32[1,9,10,13,6,5] parameter(1)
ROOT dot = f32[10,1,17,9,6] dot(lhs,rhs), lhs_batch_dims={3,0},
rhs_batch_dims={2,0},
lhs_contracting_dims={1,4},
rhs_contracting_dims={5,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_FALSE(pass.Run(m.get()).value());
}
}
} |
1,838 | cpp | tensorflow/tensorflow | while_loop_simplifier | third_party/xla/xla/service/while_loop_simplifier.cc | third_party/xla/xla/service/while_loop_simplifier_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_SIMPLIFIER_H_
#define XLA_SERVICE_WHILE_LOOP_SIMPLIFIER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
absl::StatusOr<bool> TryRemoveDeadWhileParams(HloInstruction* while_op);
class WhileLoopSimplifier : public HloModulePass {
public:
explicit WhileLoopSimplifier(bool simplify_compare_instrs = false)
: simplify_compare_instrs_(simplify_compare_instrs) {}
~WhileLoopSimplifier() override = default;
absl::string_view name() const override { return "simplify-while-loops"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const bool simplify_compare_instrs_;
};
}
#endif
#include "xla/service/while_loop_simplifier.h"
#include <cstdint>
#include <optional>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/union_find.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace m = match;
using hlo_query::ContainsInstrWithOpcode;
using std::optional;
static absl::StatusOr<bool> TryRemoveTrivialCompare(HloInstruction* while_op) {
std::optional<int64_t> indvar_index = GetLoopInductionVarTupleIdx(while_op);
if (indvar_index.has_value()) {
if (while_op->operand(0)->operand(*indvar_index)->IsConstant()) {
const HloConstantInstruction* init_value_hlo =
Cast<HloConstantInstruction>(
while_op->operand(0)->operand(*indvar_index));
std::optional<int64_t> trip_count = MatchTrivialLoopTripCount(
while_op, indvar_index.value(), init_value_hlo->literal());
if (trip_count.has_value()) {
std::optional<int64_t> init_value =
LiteralUtil::LiteralAsScalarInt64(init_value_hlo->literal());
for (HloInstruction* body_instr :
while_op->while_body()->instructions()) {
HloInstruction* constant;
if (Match(body_instr,
m::Compare(m::GetTupleElement(m::Parameter(),
indvar_index.value()),
m::Constant(&constant).IsConstantScalar()))) {
std::optional<int64_t> constant_value =
LiteralUtil::LiteralAsScalarInt64(constant->literal());
if (constant_value.has_value()) {
if (constant_value.value() <= init_value.value()) {
if (body_instr->comparison_direction() ==
ComparisonDirection::kLt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, false)));
return true;
} else if (body_instr->comparison_direction() ==
ComparisonDirection::kGt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, true)));
return true;
}
}
if (constant_value.value() >=
init_value.value() + trip_count.value()) {
if (body_instr->comparison_direction() ==
ComparisonDirection::kLt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, true)));
return true;
} else if (body_instr->comparison_direction() ==
ComparisonDirection::kGt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, false)));
return true;
}
}
}
}
}
}
}
}
return false;
}
void CopyFrontendAttributes(HloInstruction* old_while_op,
HloInstruction* new_while_op) {
new_while_op->add_frontend_attributes(old_while_op->frontend_attributes());
}
void CopyMetadata(HloInstruction* old_while_op, HloInstruction* new_while_op) {
new_while_op->set_metadata(old_while_op->metadata());
}
static absl::StatusOr<HloInstruction*> RemoveDeadTupleIndices(
HloInstruction* while_op, absl::flat_hash_set<int64_t>& used_tuple_indices,
int64_t index_for_replaced = -1) {
std::vector<int64_t> new_to_old_tuple_idx(used_tuple_indices.begin(),
used_tuple_indices.end());
absl::c_sort(new_to_old_tuple_idx);
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
HloInstruction* while_init = while_op->mutable_operand(0);
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_body_root = while_body->root_instruction();
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
absl::flat_hash_map<int64_t, int64_t> old_to_new_tuple_idx;
for (int64_t new_idx = 0; new_idx < new_to_old_tuple_idx.size(); ++new_idx) {
int64_t old_idx = new_to_old_tuple_idx[new_idx];
old_to_new_tuple_idx[old_idx] = new_idx;
VLOG(2) << "Remapping tuple index " << old_idx << " to " << new_idx;
}
std::vector<const Shape*> new_while_tuple_elem_shapes;
new_while_tuple_elem_shapes.reserve(new_to_old_tuple_idx.size());
for (int64_t old_idx : new_to_old_tuple_idx) {
new_while_tuple_elem_shapes.push_back(
&while_init->shape().tuple_shapes(old_idx));
}
Shape new_while_shape =
ShapeUtil::MakeTupleShapeWithPtrs(new_while_tuple_elem_shapes);
auto make_while_computation_replacements = [&](const HloComputation* comp) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
auto* param = comp->parameter_instruction(0);
replacements.emplace(param, HloInstruction::CreateParameter(
0, new_while_shape, param->name()));
std::vector<HloInstruction*> materialized_users(param->users().begin(),
param->users().end());
for (const auto* user : materialized_users) {
if (user == while_body_root) {
continue;
}
CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement)
<< user->ToString(print_no_metadata);
int64_t old_idx = user->tuple_index();
auto new_idx_iter = old_to_new_tuple_idx.find(old_idx);
if (new_idx_iter != old_to_new_tuple_idx.end()) {
replacements.emplace(
user, HloInstruction::CreateGetTupleElement(user->shape(), param,
new_idx_iter->second));
} else {
replacements.emplace(user, nullptr);
}
}
for (const auto* hlo : comp->MakeInstructionPostOrder()) {
if (hlo == comp->root_instruction() || replacements.contains(hlo)) {
continue;
}
for (const auto* operand : hlo->operands()) {
auto op_it = replacements.find(operand);
if (op_it != replacements.end() && op_it->second == nullptr) {
replacements[hlo] = nullptr;
break;
}
}
}
return replacements;
};
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
while_cond_replacements = make_while_computation_replacements(while_cond);
std::unique_ptr<HloComputation> new_while_cond =
while_cond->CloneWithReplacements(&while_cond_replacements);
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
while_body_replacements = make_while_computation_replacements(while_body);
std::vector<HloInstruction*> new_while_body_root_elems;
new_while_body_root_elems.reserve(new_to_old_tuple_idx.size());
for (int64_t old_idx : new_to_old_tuple_idx) {
new_while_body_root_elems.push_back(
while_body_root->mutable_operand(old_idx));
}
while_body_replacements.emplace(
while_body_root, HloInstruction::CreateTuple(new_while_body_root_elems));
std::unique_ptr<HloComputation> new_while_body =
while_body->CloneWithReplacements(&while_body_replacements);
std::vector<HloInstruction*> new_while_init_elems;
new_while_init_elems.reserve(new_to_old_tuple_idx.size());
for (int64_t old_idx : new_to_old_tuple_idx) {
new_while_init_elems.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
while_init->shape().tuple_shapes(old_idx), while_init, old_idx)));
}
auto* new_while_init = computation->AddInstruction(
HloInstruction::CreateTuple(new_while_init_elems));
auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile(
new_while_shape,
module->AddEmbeddedComputation(std::move(new_while_cond)),
module->AddEmbeddedComputation(std::move(new_while_body)),
new_while_init));
new_while_op->CopyBackendConfigFrom(while_op);
CopyFrontendAttributes(while_op, new_while_op);
CopyMetadata(while_op, new_while_op);
std::vector<HloInstruction*> new_tuple_elems;
const int64_t tuple_size = ShapeUtil::TupleElementCount(while_init->shape());
for (int64_t old_idx = 0; old_idx < tuple_size; ++old_idx) {
auto new_tuple_idx_it = old_to_new_tuple_idx.find(old_idx);
if (new_tuple_idx_it != old_to_new_tuple_idx.end() ||
index_for_replaced != -1) {
int64_t gte_idx = new_tuple_idx_it != old_to_new_tuple_idx.end()
? new_tuple_idx_it->second
: index_for_replaced;
new_tuple_elems.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_while_op->shape().tuple_shapes(gte_idx), new_while_op,
gte_idx)));
} else {
new_tuple_elems.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
while_init->shape().tuple_shapes(old_idx), while_init, old_idx)));
}
}
HloInstruction* new_tuple =
computation->AddInstruction(HloInstruction::CreateTuple(new_tuple_elems));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, new_tuple));
return new_while_op;
}
absl::StatusOr<bool> TryRemoveDeadWhileParams(HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
if (!while_op->parent()->IsSafelyRemovable(while_op)) {
VLOG(2) << "Can't remove dead parameters from non-removable while op.";
return false;
}
HloInstruction* while_init = while_op->mutable_operand(0);
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_body_root = while_body->root_instruction();
if (!while_init->shape().IsTuple()) {
VLOG(2) << "While op's carried value isn't tuple shaped.";
return false;
}
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body's root is not a tuple(...) instruction.";
return false;
}
const int64_t tuple_size = ShapeUtil::TupleElementCount(while_init->shape());
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
absl::flat_hash_set<int64_t> used_tuple_indices;
for (int64_t i = 0; i < tuple_size; ++i) {
used_tuple_indices.insert(i);
}
for (const HloInstruction* instr : {while_body->parameter_instruction(0),
while_cond->parameter_instruction(0)}) {
for (const HloInstruction* user : instr->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(2) << "Cowardly refusing to analyze while loop with "
<< instr->ToString(print_no_metadata)
<< " used by non-GTE instruction "
<< user->ToString(print_no_metadata) << " in computation "
<< instr->parent()->name();
return false;
}
}
}
if (tuple_size == 0) {
VLOG(2) << "Can't remove elements from while loop's tuple -- it's already "
"empty.";
return false;
}
absl::flat_hash_set<int64_t> used_indices_after_loop;
if (while_op == while_op->parent()->root_instruction()) {
for (int64_t i = 0; i < while_body_root->operand_count(); ++i) {
used_indices_after_loop.insert(i);
}
}
for (auto user : while_op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
for (int64_t i = 0; i < while_body_root->operand_count(); ++i) {
used_indices_after_loop.insert(i);
}
break;
}
used_indices_after_loop.insert(user->tuple_index());
}
struct InputIndicesSet {
void Merge(const InputIndicesSet& other) {
if (all.size() + other.all.size() <= all.capacity() && owned == nullptr) {
absl::c_copy(other.all, std::back_inserter(all));
return;
}
if (owned == nullptr) {
owned = std::make_unique<absl::flat_hash_set<int64_t>>();
owned->reserve(other.all.front()->size() * 2);
}
for (auto* deps : all) {
if (deps == owned.get()) {
continue;
}
owned->insert(deps->begin(), deps->end());
}
for (auto* deps : other.all) {
owned->insert(deps->begin(), deps->end());
}
all.clear();
all.push_back(owned.get());
}
void Add(int64_t index) {
if (owned == nullptr) {
CHECK(all.empty());
owned = std::make_unique<absl::flat_hash_set<int64_t>>();
all.push_back(owned.get());
}
owned->insert(index);
}
std::unique_ptr<absl::flat_hash_set<int64_t>> owned;
absl::InlinedVector<const absl::flat_hash_set<int64_t>*, 4> all;
};
absl::flat_hash_map<HloInstruction*, InputIndicesSet> inst_input_deps;
absl::flat_hash_map<HloInstruction*, tensorflow::UnionFind<HloInstruction*>>
disjoint_sets;
for (HloComputation* comp : {while_body, while_cond}) {
HloInstruction* while_input = comp->parameter_instruction(0);
for (HloInstruction* inst : comp->instructions()) {
if (inst == while_input || inst == while_body_root) {
continue;
}
disjoint_sets[inst].Get() = inst;
}
}
absl::flat_hash_set<int64_t> side_effecting_indices;
for (HloComputation* comp : {while_body, while_cond}) {
HloInstruction* while_input = comp->parameter_instruction(0);
for (HloInstruction* inst : comp->MakeInstructionPostOrder()) {
if (inst == while_input || inst == while_body_root) {
continue;
}
auto& deps = inst_input_deps[inst];
auto& my_set = disjoint_sets[inst];
if (inst->opcode() == HloOpcode::kGetTupleElement &&
inst->operand(0) == while_input) {
deps.Add(inst->tuple_index());
HloInstruction* output =
while_body_root->mutable_operand(inst->tuple_index());
if (output != inst) {
disjoint_sets[output].Merge(&my_set);
}
} else {
for (HloInstruction* operand : inst->operands()) {
disjoint_sets[operand].Merge(&my_set);
deps.Merge(inst_input_deps[operand]);
}
}
if (inst->HasSideEffect() || inst == while_cond->root_instruction()) {
for (auto* dep : deps.all) {
side_effecting_indices.insert(dep->begin(), dep->end());
}
}
}
}
absl::flat_hash_set<int64_t> indices_affecting_others;
for (int64_t i = 0; i < tuple_size; ++i) {
HloInstruction* output = while_body_root->mutable_operand(i);
for (auto* deps : inst_input_deps[output].all) {
for (int64_t index : *deps) {
if (index != i) {
indices_affecting_others.insert(index);
}
}
}
}
for (int64_t i = 0; i < tuple_size; ++i) {
if (!indices_affecting_others.contains(i) &&
!used_indices_after_loop.contains(i) &&
!side_effecting_indices.contains(i)) {
VLOG(2) << "Remove with dependencies " << i;
used_tuple_indices.erase(i);
}
}
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<int64_t>> groups;
for (int64_t i = 0; i < tuple_size; ++i) {
HloInstruction* output = while_body_root->mutable_operand(i);
groups[disjoint_sets[output].Get()].insert(i);
}
for (HloComputation* comp : {while_body, while_cond}) {
HloInstruction* while_input = comp->parameter_instruction(0);
for (HloInstruction* gte : while_input->users()) {
groups[disjoint_sets[gte].Get()].insert(gte->tuple_index());
}
}
for (const auto& group : groups) {
if (absl::c_any_of(group.second, [&](int64_t index) {
const HloInstruction* output = while_body_root->operand(index);
return side_effecting_indices.contains(index) ||
(used_indices_after_loop.contains(index) &&
!(output->opcode() == HloOpcode::kGetTupleElement &&
output->operand(0) ==
while_body->parameter_instruction(0) &&
output->tuple_index() == index));
})) {
continue;
}
VLOG(2) << "Remove with groups:";
for (int64_t index : group.second) {
VLOG(2) << " index " << index;
used_tuple_indices.erase(index);
}
}
if (used_tuple_indices.size() == tuple_size) {
VLOG(2) << "Loop " << while_op->ToString(print_no_metadata)
<< " uses all of its inputs; no simplification possible.";
return false;
}
CHECK_LT(used_tuple_indices.size(), tuple_size);
VLOG(1) << "Eliminating " << tuple_size - used_tuple_indices.size()
<< " elements from tuple of "
<< while_op->ToString(print_no_metadata);
TF_ASSIGN_OR_RETURN(while_op,
RemoveDeadTupleIndices(while_op, used_tuple_indices));
return true;
}
static absl::StatusOr<HloInstruction*> TryRemoveRepeatedWhileTupleIndicesHelper(
HloInstruction* while_op, const int64_t tuple_index, bool replace_with_init,
absl::flat_hash_set<int64_t>& duplicates) {
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_init = while_op->mutable_operand(0);
VLOG(2) << "while_init " << while_init->ToString() << " operands "
<< while_init->operand_count();
VLOG(2) << "while_body_root " << while_body->root_instruction()->ToString()
<< " operands " << while_body->root_instruction()->operand_count();
for (HloComputation* comp : {while_body, while_cond}) {
auto new_get = comp->AddInstruction(HloInstruction::CreateGetTupleElement(
comp->parameter_instruction(0)->shape().tuple_shapes(tuple_index),
comp->parameter_instruction(0), tuple_index));
std::vector<HloInstruction*> instrs_to_replace;
for (auto* instr : comp->instructions()) {
if (instr->opcode() == HloOpcode::kGetTupleElement &&
duplicates.contains(instr->tuple_index()) &&
instr->operand(0) == comp->parameter_instruction(0)) {
instrs_to_replace.push_back(instr);
}
}
for (auto instr : instrs_to_replace) {
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_get));
}
}
absl::flat_hash_set<int64_t> used_tuple_indices;
for (int index = 0; index < while_init->shape().tuple_shapes_size();
++index) {
if (!duplicates.count(index)) {
used_tuple_indices.insert(index);
}
}
TF_ASSIGN_OR_RETURN(
while_op, RemoveDeadTupleIndices(while_op, used_tuple_indices,
replace_with_init ? -1 : tuple_index));
return while_op;
}
static bool IsDynamicUpdateSliceWhileInsertion(
const HloInstruction* instr, const HloComputation* while_body) {
return instr->opcode() == HloOpcode::kDynamicUpdateSlice &&
instr->operand(0)->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->operand(0) == while_body->parameter_instruction(0);
}
static absl::StatusOr<bool> TryRemoveRepeatedWhileTupleIndices(
HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
int index_to_investigate = 0;
if (!while_op->parent()->IsSafelyRemovable(while_op)) {
VLOG(2) << "Can't remove dead parameters from non-removable while op.";
return false;
}
HloInstruction* while_init = while_op->mutable_operand(0);
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_body_root = while_body->root_instruction();
if (!while_init->shape().IsTuple()) {
VLOG(2) << "While op's carried value isn't tuple shaped.";
return false;
}
bool changed = false;
while (index_to_investigate < while_init->shape().tuple_shapes_size()) {
if (!while_init->shape().IsTuple() ||
while_init->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While op's carried value isn't tuple shaped.";
return false;
}
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body's root is not a tuple(...) instruction.";
return false;
}
auto& while_shape = while_init->shape();
VLOG(2) << "Iterating " << index_to_investigate;
absl::flat_hash_set<int64_t> duplicates;
auto* pivot_init_elem = while_init->operand(index_to_investigate);
auto* pivot_body_elem = while_body_root->operand(index_to_investigate);
bool replace_with_init = true;
if (pivot_body_elem->opcode() == HloOpcode::kGetTupleElement &&
pivot_body_elem->operand(0) == while_body->parameter_instruction(0)) {
if (pivot_body_elem->tuple_index() != index_to_investigate) {
VLOG(2) << "Mismatch between pivot_body_elem->tuple_index() "
<< pivot_body_elem->tuple_index() << " index_to_investigate "
<< index_to_investigate;
index_to_investigate++;
continue;
}
} else if (IsDynamicUpdateSliceWhileInsertion(pivot_body_elem,
while_body)) {
if (pivot_body_elem->operand(0)->tuple_index() != index_to_investigate) {
VLOG(2)
<< "Mismatch between pivot_body_elem->operand(0)->tuple_index() "
<< pivot_body_elem->operand(0)->tuple_index()
<< " index_to_investigate " << index_to_investigate;
index_to_investigate++;
continue;
}
} else {
index_to_investigate++;
continue;
}
for (int64_t i = index_to_investigate + 1;
i < while_shape.tuple_shapes_size(); ++i) {
auto* init_elem = while_init->operand(i);
auto* body_elem = while_body_root->operand(i);
if (pivot_body_elem->opcode() == HloOpcode::kGetTupleElement &&
body_elem->opcode() == HloOpcode::kGetTupleElement &&
body_elem->operand(0) == while_body->parameter_instruction(0)) {
if (body_elem->tuple_index() != i) {
VLOG(2) << "Mismatch between body_elem->tuple_index() "
<< body_elem->tuple_index() << " i " << i;
continue;
}
} else if (IsDynamicUpdateSliceWhileInsertion(pivot_body_elem,
while_body) &&
IsDynamicUpdateSliceWhileInsertion(body_elem, while_body)) {
if (pivot_body_elem-> | #include "xla/service/while_loop_simplifier.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
using ::testing::_;
namespace op = xla::testing::opcode_matchers;
HloInstruction* FindFirstWhile(HloModule* m) {
const auto& instrs = m->entry_computation()->instructions();
return *absl::c_find_if(instrs, HloPredicateIsOp<HloOpcode::kWhile>);
}
class WhileLoopSimplifierTest : public HloTestBase {
protected:
[[nodiscard]] std::unique_ptr<VerifiedHloModule> MakeModuleWithSimpleLoop(
int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithSimpleLoopTupleElementLoopBound(int num_iters);
};
std::unique_ptr<VerifiedHloModule>
WhileLoopSimplifierTest::MakeModuleWithSimpleLoop(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(42 + num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopSimplifierTest::MakeModuleWithSimpleLoopTupleElementLoopBound(
int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoopWithIndirectLoopBound
SimpleLoopWithIndirectLoopBound.body {
loop_var.1 = (s32[], s32[3]{0}, s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
limit = s32[] get-tuple-element(loop_var.1), index=2
ROOT tuple = (s32[], s32[3]{0}, s32[]) tuple(add, multiply, limit)
}
SimpleLoopWithIndirectLoopBound.condition {
loop_var.2 = (s32[], s32[3]{0}, s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=2
ROOT less-than = pred[] compare(get-tuple-element.3, get-tuple-element.4), direction=LT
}
ENTRY SimpleLoopWithIndirectLoopBound {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
constant.2 = s32[] constant({{LOOP_BOUND}})
tuple.1 = (s32[], s32[3]{0}, s32[]) tuple(constant.3, constant.4,
constant.2)
ROOT while = (s32[], s32[3]{0}, s32[]) while(tuple.1),
condition=SimpleLoopWithIndirectLoopBound.condition,
body=SimpleLoopWithIndirectLoopBound.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(42 + num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
TEST_F(WhileLoopSimplifierTest, LoopWithZeroIterationSimplified) {
auto m = MakeModuleWithSimpleLoop(0);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Constant(), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest,
LoopWithZeroIterationTupleElementLoopBoundSimplified) {
auto m = MakeModuleWithSimpleLoopTupleElementLoopBound(0);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Constant(), op::Constant(), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, LoopWithOneIterationSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Add(), op::Multiply()));
}
TEST_F(WhileLoopSimplifierTest,
LoopWithOneIterationTupleELementLoopBoundSimplified) {
auto m = MakeModuleWithSimpleLoopTupleElementLoopBound(1);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Add(), op::Multiply(), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, LoopWithTwoIterationsNotSimplified) {
auto m = MakeModuleWithSimpleLoop(2);
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithControlDependencySimplifiedDependencyPreserved) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* true_op = while_op->while_body()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(true_op->AddControlDependencyTo(
while_op->while_body()->root_instruction()));
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(computation->root_instruction()->control_predecessors(),
ElementsAre(op::Constant()))
<< computation->ToString();
}
TEST_F(WhileLoopSimplifierTest, LoopWithSendNotSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* token = while_body->AddInstruction(HloInstruction::CreateToken());
auto* send = while_body->AddInstruction(HloInstruction::CreateSend(
while_body->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
token,
0));
while_body->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithRecvNotSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* token = while_body->AddInstruction(HloInstruction::CreateToken());
auto* recv = while_body->AddInstruction(
HloInstruction::CreateRecv(ShapeUtil::MakeShape(F32, {1}), token,
0));
while_body->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithInfeedSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto token = while_body->AddInstruction(HloInstruction::CreateToken());
while_body->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithInfeedInCondNotSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_cond = while_op->while_condition();
auto token = while_cond->AddInstruction(HloInstruction::CreateToken());
while_cond->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, NonTupleShapedLoopNotSimplified) {
const std::string hlo_string = R"(
HloModule NonTupleShapedLoop
NonTupleShapedLoop.body {
loop_var.1 = s32[] parameter(0)
constant.1 = s32[] constant(-1)
ROOT add = s32[] add(s32[] loop_var.1, s32[] constant.1)
}
NonTupleShapedLoop.condition {
loop_var = s32[] parameter(0)
constant = s32[] constant(100)
ROOT less-than = pred[] compare(s32[] loop_var, s32[] constant), direction=LT
}
ENTRY INonTupleShapedLoop {
constant.2 = s32[] constant(42)
ROOT while = s32[] while(s32[] constant.2),
condition=NonTupleShapedLoop.condition,
body=NonTupleShapedLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopSwappingTupleElementsNotSimplified) {
const std::string hlo_string = R"(
HloModule SwappingTupleElements
SwappingTupleElements.body {
loop_var = (s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[]) loop_var),index=1
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[]) loop_var),
index=0
ROOT tuple = (s32[], s32[]) tuple(s32[] get-tuple-element,
s32[] get-tuple-element.1)
}
SwappingTupleElements.always_true {
param = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY SwappingTupleElements {
x = s32[] parameter(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[]) tuple(s32[] x, s32[] y)
ROOT while = (s32[], s32[]) while((s32[], s32[]) tuple.1),
condition=SwappingTupleElements.always_true,
body=SwappingTupleElements.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithUnusedButModifiedTupleElementNotSimplified) {
const std::string hlo_string = R"(
HloModule UnusedButModifiedTupleElement
UnusedButModifiedTupleElement.body {
loop_var = (s32[]) parameter(0)
constant.1 = s32[] constant(1)
ROOT tuple = (s32[]) tuple(s32[] constant.1)
}
UnusedButModifiedTupleElement.always_true {
param = (s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY UnusedButModifiedTupleElement {
constant.2 = s32[] constant(0)
tuple.1 = (s32[]) tuple(s32[] constant.2)
ROOT while = (s32[]) while((s32[]) tuple.1),
condition=UnusedButModifiedTupleElement.always_true,
body=UnusedButModifiedTupleElement.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithUnusedOutsideLoopButModifiedTupleElementSimplified) {
const std::string hlo_string = R"(
HloModule UnusedButModifiedTupleElement
UnusedButModifiedTupleElement.body {
loop_var = (s32[], s32[]) parameter(0)
constant.1 = s32[] constant(1)
ROOT tuple = (s32[], s32[]) tuple(s32[] constant.1, constant.1)
}
UnusedButModifiedTupleElement.cond {
param = (s32[], s32[]) parameter(0)
gte.cond = s32[] get-tuple-element(param), index=0
constant.3 = s32[] constant(1)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY UnusedButModifiedTupleElement {
constant.2 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.2)
while = (s32[], s32[]) while(tuple.1),
condition=UnusedButModifiedTupleElement.cond,
body=UnusedButModifiedTupleElement.body
ROOT gte = s32[] get-tuple-element(while), index=0
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
auto m_while = AllOf(op::While(), op::Shape("(s32[])"));
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::GetTupleElement(m_while));
}
TEST_F(WhileLoopSimplifierTest, LoopWithEmptyTupleNotSimplified) {
const std::string hlo_string = R"(
HloModule EmptyTuple
EmptyTuple.body {
loop_var = () parameter(0)
ROOT tuple = () tuple()
}
EmptyTuple.always_true {
param = () parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY EmptyTuple {
tuple.1 = () tuple()
ROOT while = () while(() tuple.1), condition=EmptyTuple.always_true,
body=EmptyTuple.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithElemUsedTwiceNotSimplified) {
const std::string hlo_string = R"(
HloModule ElemUsedTwice
ElemUsedTwice.body {
param0 = (s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[]) param0), index=0
ROOT tuple = (s32[], s32[]) tuple(s32[] get-tuple-element,
s32[] get-tuple-element)
}
ElemUsedTwice.always_true {
param = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY ElemUsedTwice {
x = s32[] parameter(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[]) tuple(s32[] x, s32[] y)
ROOT while = (s32[], s32[]) while((s32[], s32[]) tuple.1),
condition=ElemUsedTwice.always_true, body=ElemUsedTwice.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperands) {
const std::string hlo_string = R"(
HloModule RemoveUnusedOperands
RemoveUnusedOperands.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=0
get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=1
constant.1 = s32[] constant(1)
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[])
loop_var), index=2
ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1,
s32[] add, s32[] get-tuple-element.3)
}
RemoveUnusedOperands.loop_condition {
constant.2 = s32[] constant(0)
param0 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0),
index=2
ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ
}
ENTRY RemoveUnusedOperands {
x = s32[] parameter(0)
constant.3 = s32[] constant(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3,
s32[] y)
ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1),
condition=RemoveUnusedOperands.loop_condition,
body=RemoveUnusedOperands.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
const auto& instrs = m->entry_computation()->instructions();
HloInstruction* new_while_op =
*absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return (instr->opcode() == HloOpcode::kWhile &&
instr->name() != "while");
});
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_TRUE(
ShapeUtil::Equal(new_while_op->shape(),
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32})))
<< ShapeUtil::HumanString(new_while_op->shape());
EXPECT_THAT(
new_while_op->while_body()->root_instruction(),
op::Tuple(
op::Add(op::GetTupleElement(op::Parameter(0), 0),
op::Constant()),
op::GetTupleElement(op::Parameter(0), 1)));
EXPECT_THAT(new_while_op->while_condition()->root_instruction(),
op::Eq(op::Constant(),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperandsCheckMetadata) {
const std::string hlo_string = R"(
HloModule RemoveUnusedOperands
RemoveUnusedOperands.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=0
get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=1
constant.1 = s32[] constant(1)
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[])
loop_var), index=2
ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1,
s32[] add, s32[] get-tuple-element.3)
}
RemoveUnusedOperands.loop_condition {
constant.2 = s32[] constant(0)
param0 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0),
index=2
ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ
}
ENTRY RemoveUnusedOperands {
x = s32[] parameter(0)
constant.3 = s32[] constant(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3,
s32[] y)
ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1),
condition=RemoveUnusedOperands.loop_condition,
body=RemoveUnusedOperands.body, metadata={op_name="while"}
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
OpMetadata while_metadata;
while_metadata.set_op_name("while");
EXPECT_THAT(m->entry_computation()->root_instruction(),
AllOf(op::Tuple(), op::Metadata(while_metadata)));
EXPECT_THAT(m->entry_computation()->GetInstructionWithName("while.1"),
AllOf(op::While(), op::Metadata(while_metadata)));
}
TEST_F(WhileLoopSimplifierTest,
RemoveUnusedLoopOperandsDespiteSideEffectingOps) {
const std::string hlo_string = R"(
HloModule RemoveUnusedOperands
body {
loop_var = (s32[]) parameter(0)
gte0 = s32[] get-tuple-element(loop_var), index=0
token0 = token[] after-all()
unused = ((s32[], pred[]), token[]) infeed(token0)
ROOT tuple = (s32[]) tuple(gte0)
}
cond {
loop_var = (s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY RemoveUnusedOperands {
x = s32[] parameter(0)
tuple.1 = (s32[]) tuple(s32[] x)
ROOT while = (s32[]) while((s32[]) tuple.1),
condition=cond, body=body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
const auto& instrs = m->entry_computation()->instructions();
HloInstruction* new_while_op =
*absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return (instr->opcode() == HloOpcode::kWhile &&
instr->name() != "while");
});
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(new_while_op->shape()))
<< new_while_op->shape().ToString();
}
TEST_F(WhileLoopSimplifierTest, LoopWithNonTupleBodyShapeNotSimplified) {
const std::string hlo_string = R"(
HloModule BodyHasNonTupleRoot
BodyHasNonTupleRoot.passthrough {
ROOT param = (s32[], s32[]) parameter(0)
}
BodyHasNonTupleRoot.always_true {
param.1 = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY BodyHasNonTupleRoot {
init_value = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while((s32[], s32[]) init_value),
condition=BodyHasNonTupleRoot.always_true,
body=BodyHasNonTupleRoot.passthrough
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithNonTupleBodyRootInstructionNotSimplified) {
const std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT custom-call = (s32[], s32[3]{0}) custom-call(add, multiply),
custom_call_target="x"
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(44)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithArrayConstantNotSimplified) {
const std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}, s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
add.2 = s32[3]{0} add(get-tuple-element.2, get-tuple-element.3)
ROOT tuple = (s32[], s32[3]{0}, s32[3]{0}) tuple(add, add.2, get-tuple-element.3)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}, s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(47)
ROOT less-than = pred[] compare(get-tuple-element.4, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}, s32[3]{0}) tuple(constant.3, constant.4, constant.4)
ROOT while = (s32[], s32[3]{0}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, FlattenNestedTuple) {
const std::string hlo_string = R"(
HloModule Test
Body {
param = ((s32[1]), (s32[2], s32[3], (s32[4]))) parameter(0)
ta = (s32[1]) get-tuple-element(param), index=0
a = s32[1] get-tuple-element(ta), index=0
a.1 = s32[1] add(a, a)
tbcd = (s32[2], s32[3], (s32[4])) get-tuple-element(param), index=1
ROOT tuple = ((s32[1]), (s32[2], s32[3], (s32[4]))) tuple(ta, tbcd)
}
Cond {
param = ((s32[1]), (s32[2], s32[3], (s32[4]))) parameter(0)
ROOT cond = pred[] constant(true)
}
ENTRY Loop {
a = s32[1] constant({0})
b = s32[2] constant({0,1})
c = s32[3] constant({0,1,2})
d = s32[4] constant({0,1,2,3})
ta = (s32[1]) tuple(a)
td = (s32[4]) tuple(d)
tbcd = (s32[2], s32[3], (s32[4])) tuple(b, c, td)
init = ((s32[1]), (s32[2], s32[3], (s32[4]))) tuple(ta, tbcd)
ROOT while = ((s32[1]), (s32[2], s32[3], (s32[4]))) while(init),
condition=Cond, body=Body
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape flat_tuple = ParseShape("(s32[1], s32[2], s32[3], s32[4])").value();
SCOPED_TRACE(m->ToString());
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(), flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
m->entry_computation()->root_instruction()->shape(),
ParseShape("((s32[1]), (s32[2], s32[3], (s32[4])))").value()));
}
TEST_F(WhileLoopSimplifierTest, OnlyConstantsInLoopCarry) {
const std::string hlo_string = R"(
HloModule Test
Body {
param = (s32[1]) parameter(0)
a = s32[1] constant({0})
ROOT tuple = (s32[1]) tuple(a)
}
Cond {
param = (s32[1]) parameter(0)
ROOT cond = pred[] constant(true)
}
ENTRY Loop {
a = s32[1] constant({0})
init = (s32[1]) tuple(a)
ROOT while = (s32[1]) while(init), condition=Cond, body=Body
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, RemoveConstantFromLoopCarry) {
const std::string hlo_string = R"(
HloModule Test
Body {
param = (s32[1], s32[2], s32[3]) parameter(0)
a = s32[1] get-tuple-element(param), index=0
a.1 = s32[1] add(a, a)
b = s32[2] constant({1,1})
c = s32[3] constant({10,10,10})
ROOT tuple = (s32[1], s32[2], s32[3]) tuple(a.1, b, c)
}
Cond {
param = (s32[1], s32[2], s32[3]) parameter(0)
a = s32[1] get-tuple-element(param), index=0
b = s32[2] get-tuple-element(param), index=1
c = s32[3] get-tuple-element(param), index=2
ROOT cond = pred[] constant(true)
}
ENTRY Loop {
a = s32[1] constant({0})
b = s32[2] constant({1,1})
c = s32[3] constant({2,2,2})
init = (s32[1], s32[2], s32[3]) tuple(a,b,c)
ROOT while = (s32[1], s32[2], s32[3]) while(init),
condition=Cond, body=Body
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape new_while_shape = ParseShape("(s32[1], s32[3])").value();
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(
ShapeUtil::Equal(m->entry_computation()->root_instruction()->shape(),
ParseShape("(s32[1], s32[2], s32[3])").value()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(_, op::Constant(), _));
}
const char* const kSimpleMergeInductionVariablesModule = R"(
HloModule Test
Body {
param = (TYPE[], TYPE[], TYPE[]) parameter(0)
a = TYPE[] get-tuple-element(param), index=0
one = TYPE[] constant(1)
a1 = TYPE[] add(a, one)
b = TYPE[] get-tuple-element(param), index=1
negone = TYPE[] constant(-1)
b1 = TYPE[] add(b, negone)
c = TYPE[] add(a, b)
ROOT tuple = (TYPE[], TYPE[], TYPE[]) tuple(a1,b1,c)
}
Cond {
param = (TYPE[], TYPE[], TYPE[]) parameter(0)
a = TYPE[] get-tuple-element(param), index=0
b = TYPE[] get-tuple-element(param), index=1
sum = TYPE[] power(a, b)
ten = TYPE[] constant(10)
ROOT cond = pred[] compare(sum, ten), directi |
1,839 | cpp | tensorflow/tensorflow | hlo_parser | third_party/xla/xla/service/hlo_parser.cc | third_party/xla/xla/service/hlo_parser_test.cc | #ifndef XLA_SERVICE_HLO_PARSER_H_
#define XLA_SERVICE_HLO_PARSER_H_
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_lexer.h"
#include "xla/xla_data.pb.h"
namespace xla {
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndReturnUnverifiedModule(
absl::string_view str, const HloModuleConfig& config);
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndReturnUnverifiedModule(
absl::string_view str);
absl::StatusOr<HloSharding> ParseSharding(absl::string_view str);
absl::StatusOr<FrontendAttributes> ParseFrontendAttributes(
absl::string_view str);
absl::StatusOr<StatisticsViz> ParseStatisticsViz(absl::string_view str);
absl::StatusOr<std::vector<bool>> ParseParameterReplication(
absl::string_view str);
absl::StatusOr<Window> ParseWindow(absl::string_view str);
absl::StatusOr<ConvolutionDimensionNumbers> ParseConvolutionDimensionNumbers(
absl::string_view str);
absl::StatusOr<PaddingConfig> ParsePaddingConfig(absl::string_view str);
absl::StatusOr<Shape> ParseShape(absl::string_view str);
absl::StatusOr<Layout> ParseLayout(absl::string_view str);
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly(
absl::string_view str);
class HloParser {
public:
virtual absl::Status Run(HloModule* module) = 0;
virtual ~HloParser() {}
private:
static std::unique_ptr<HloParser> CreateHloParserForTests(
absl::string_view str);
friend class VerifiedHloModule;
};
}
#endif
#include "xla/service/hlo_parser.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_lexer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/name_uniquer.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/gtl/map_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
HloSchedule ScheduleFromInstructionOrder(HloModule* module) {
HloSchedule schedule(module);
for (HloComputation* computation : module->computations()) {
if (!computation->IsFusionComputation()) {
for (HloInstruction* instruction : computation->instructions()) {
schedule.GetOrCreateSequence(computation).push_back(instruction);
}
}
}
return schedule;
}
bool CanInferShape(HloOpcode code) {
switch (code) {
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAtan2:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCeil:
case HloOpcode::kCholesky:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConvolution:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDivide:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFft:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kGetTupleElement:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kAnd:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kPad:
case HloOpcode::kPartitionId:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReduce:
case HloOpcode::kRemainder:
case HloOpcode::kReplicaId:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kTopK:
return true;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDynamicReshape:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduceScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSlice:
case HloOpcode::kBitcast:
case HloOpcode::kBitcastConvert:
case HloOpcode::kConstant:
case HloOpcode::kConvert:
case HloOpcode::kCustomCall:
case HloOpcode::kFusion:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kReducePrecision:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kStochasticConvert:
return false;
}
}
class HloParserImpl : public HloParser {
public:
using LocTy = HloLexer::LocTy;
using BoolList = absl::InlinedVector<bool, 1>;
explicit HloParserImpl(absl::string_view str) : lexer_(str) {}
absl::Status Run(HloModule* module) override;
std::string GetError() const { return StrJoin(error_, "\n"); }
absl::StatusOr<Shape> ParseShapeOnly();
absl::StatusOr<Layout> ParseLayoutOnly();
absl::StatusOr<HloSharding> ParseShardingOnly();
absl::StatusOr<FrontendAttributes> ParseFrontendAttributesOnly();
absl::StatusOr<StatisticsViz> ParseStatisticsVizOnly();
absl::StatusOr<std::vector<bool>> ParseParameterReplicationOnly();
absl::StatusOr<BoolList> ParseBooleanListOrSingleBooleanOnly();
absl::StatusOr<Window> ParseWindowOnly();
absl::StatusOr<ConvolutionDimensionNumbers>
ParseConvolutionDimensionNumbersOnly();
absl::StatusOr<PaddingConfig> ParsePaddingConfigOnly();
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly();
private:
enum class AttrTy {
kBool,
kInt64,
kInt32,
kFloat,
kString,
kLiteral,
kBracedInt64List,
kBracedInt64ListList,
kHloComputation,
kBracedHloComputationList,
kFftType,
kPaddingType,
kComparisonDirection,
kComparisonType,
kWindow,
kConvolutionDimensionNumbers,
kSharding,
kFrontendAttributes,
kStatisticsViz,
kBracedBoolListOrBool,
kParameterReplication,
kInstructionList,
kSliceRanges,
kPaddingConfig,
kMetadata,
kFusionKind,
kDistribution,
kDomain,
kPrecisionList,
kShape,
kShapeList,
kEnum,
kRandomAlgorithm,
kPrecisionAlgorithm,
kAliasing,
kBufferDonor,
kComputationLayout,
kInstructionAliasing,
kCustomCallSchedule,
kCustomCallApiVersion,
kSparsityDescriptor,
kStringOrJsonDict,
};
struct AttrConfig {
bool required;
AttrTy attr_type;
void* result;
};
using InstrNameTable =
absl::flat_hash_map<std::string, std::pair<HloInstruction*, LocTy>>;
InstrNameTable& current_name_table() { return scoped_name_tables_.back(); }
std::pair<HloInstruction*, LocTy>* FindInstruction(
const std::string& name, const optional<Shape>& shape = nullopt);
bool ParseSingleInstruction(HloModule* module);
bool ParseHloModule(HloModule* module,
bool parse_module_without_header = false);
bool ParseComputations(HloModule* module);
bool ParseComputation(HloComputation** entry_computation);
bool ParseInstructionList(HloComputation** computation,
const std::string& computation_name);
bool ParseInstruction(HloComputation::Builder* builder,
std::string* root_name);
bool ParseInstructionRhs(HloComputation::Builder* builder, std::string name,
LocTy name_loc, bool allow_attributes = true);
bool ParseControlPredecessors(HloInstruction* instruction);
bool ParseLiteral(Literal* literal);
bool ParseLiteral(Literal* literal, const Shape& shape);
bool ParseTupleLiteral(Literal* literal, const Shape& shape);
bool ParseNonTupleLiteral(Literal* literal, const Shape& shape);
bool ParseDenseLiteral(Literal* literal, const Shape& shape);
HloInstruction* CreateInstruction(
HloComputation::Builder* builder, absl::string_view name,
std::optional<Shape> shape, HloOpcode opcode,
std::optional<HloOpcode> async_wrapped_opcode,
absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes,
std::vector<HloInstruction*>* preset_operands = nullptr);
bool SetValueInLiteral(LocTy loc, int64_t value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, double value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, bool value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, std::complex<double> value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool SetValueInLiteralHelper(LocTy loc, ParsedElemT value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool CheckParsedValueIsInRange(LocTy loc, ParsedElemT value);
template <typename LiteralNativeT>
bool CheckParsedValueIsInRange(LocTy loc, std::complex<double> value);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder, int expected_size);
struct SliceRanges {
std::vector<int64_t> starts;
std::vector<int64_t> limits;
std::vector<int64_t> strides;
};
struct DomainData {
std::unique_ptr<DomainMetadata> entry_metadata;
std::unique_ptr<DomainMetadata> exit_metadata;
};
bool ParseAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes = true);
bool ParseSubAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs);
bool ParseAttributeHelper(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
absl::flat_hash_set<std::string>* seen_attrs);
bool CopyAttributeToProtoMessage(
absl::flat_hash_set<std::string> non_proto_attrs,
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
tsl::protobuf::Message* message);
bool ParseAttributesAsProtoMessage(
const absl::flat_hash_map<std::string, AttrConfig>& non_proto_attrs,
tsl::protobuf::Message* message);
bool ParseComputationName(HloComputation** value);
bool ParseInstructionNames(std::vector<HloInstruction*>* instructions);
bool ParseWindow(Window* window, bool expect_outer_curlies);
bool ParseConvolutionDimensionNumbers(ConvolutionDimensionNumbers* dnums);
bool ParsePaddingConfig(PaddingConfig* padding);
bool ParseMetadata(OpMetadata* metadata);
bool ParseSingleOrListMetadata(
tsl::protobuf::RepeatedPtrField<OpMetadata>* metadata);
bool ParseOpShardingType(OpSharding::Type* type);
bool ParseListShardingType(std::vector<OpSharding::Type>* types);
bool ParseSharding(OpSharding* sharding);
bool ParseFrontendAttributes(FrontendAttributes* frontend_attributes);
bool ParseStatisticsViz(StatisticsViz* statistics_viz);
bool ParseSingleSharding(OpSharding* sharding, bool lbrace_pre_lexed);
bool ParseParameterReplication(ParameterReplication* parameter_replication);
bool ParseBooleanListOrSingleBoolean(BoolList* boolean_list);
bool ParseReplicaGroupsOnly(std::vector<ReplicaGroup>* replica_groups);
bool ParseDomain(DomainData* domain);
bool ParseDxD(const std::string& name, std::vector<int64_t>* result);
bool ParseWindowPad(std::vector<std::vector<int64_t>>* pad);
bool ParseSliceRanges(SliceRanges* result);
bool ParsePrecisionList(std::vector<PrecisionConfig::Precision>* result);
bool ParseHloComputation(HloComputation** result);
bool ParseHloComputationList(std::vector<HloComputation*>* result);
bool ParseShapeList(std::vector<Shape>* result);
bool ParseInt64List(TokKind start, TokKind end, TokKind delim,
std::vector<int64_t>* result);
bool ParseInt64ListList(TokKind start, TokKind end, TokKind delim,
std::vector<std::vector<int64_t>>* result);
bool ParseList(TokKind start, TokKind end, TokKind delim,
absl::FunctionRef<bool()> parse_and_add_item);
bool ParseParamListToShape(Shape* shape, LocTy* shape_loc);
bool ParseParamList();
bool ParseName(std::string* result);
bool ParseAttributeName(std::string* result);
bool ParseString(std::string* result);
bool ParseJsonDict(std::string* result);
bool ParseDimensionSizes(std::vector<int64_t>* dimension_sizes,
std::vector<bool>* dynamic_dimensions);
bool ParseShape(Shape* result);
bool ParseLayout(Layout* layout);
bool ParseLayoutIntAttribute(int64_t* attr_value,
absl::string_view attr_description);
bool ParseDimLevelTypes(
absl::InlinedVector<DimLevelType, InlineRank()>* dim_level_types,
absl::InlinedVector<bool, InlineRank()>* dim_unique,
absl::InlinedVector<bool, InlineRank()>* dim_ordered);
bool ParseTiles(std::vector<Tile>* tiles);
bool ParseSplitConfigs(std::vector<SplitConfig>& split_configs);
bool ParsePhysicalShape(Shape* physical_shape);
bool ParseOpcode(HloOpcode* opcode,
std::optional<HloOpcode>* async_wrapped_opcode);
bool ParseFftType(FftType* result);
bool ParsePaddingType(PaddingType* result);
bool ParsePrimitiveType(PrimitiveType* result);
bool ParseComparisonDirection(ComparisonDirection* result);
bool ParseComparisonType(Comparison::Type* result);
bool ParseFusionKind(HloInstruction::FusionKind* result);
bool ParseRandomDistribution(RandomDistribution* result);
bool ParseRandomAlgorithm(RandomAlgorithm* result);
bool ParsePrecision(PrecisionConfig::Precision* result);
bool ParseAlgorithm(PrecisionConfig::Algorithm* result);
bool ParseInt64(int64_t* result);
bool ParseDouble(double* result);
bool ParseComplex(std::complex<double>* result);
bool ParseBool(bool* result);
bool ParseToken(TokKind kind, const std::string& msg);
bool ParseUnsignedIntegerType(PrimitiveType* primitive_type);
using AliasingData =
absl::flat_hash_map<ShapeIndex, HloInputOutputAliasConfig::Alias>;
using BufferDonor = absl::flat_hash_set<HloBufferDonorConfig::BufferDonor>;
bool ParseAliasing(AliasingData* data);
bool ParseBufferDonor(BufferDonor* data);
bool ParseComputationLayout(ComputationLayout* computation_layout);
bool ParseInstructionOutputOperandAliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>*
aliasing_output_operand_pairs);
bool ParseCustomCallSchedule(CustomCallSchedule* result);
bool ParseCustomCallApiVersion(CustomCallApiVersion* result);
bool ParseSparsityDescriptor(std::vector<SparsityDescriptor>* result);
bool ParseShapeIndex(ShapeIndex* out);
bool CanBeShape();
bool CanBeParamListToShape();
bool TokenError(absl::string_view msg);
bool Error(LocTy loc, absl::string_view msg);
bool EatIfPresent(TokKind kind);
bool AddInstruction(const std::string& name, HloInstruction* instruction,
LocTy name_loc);
bool AddComputation(const std::string& name, HloComputation* computation,
LocTy name_loc);
HloLexer lexer_;
std::vector<InstrNameTable> scoped_name_tables_;
class Scope {
public:
explicit Scope(std::vector<InstrNameTable>* scoped_name_tables)
: scoped_name_tables_(scoped_name_tables) {
scoped_name_tables_->emplace_back();
}
~Scope() { scoped_name_tables_->pop_back(); }
private:
std::vector<InstrNameTable>* scoped_name_tables_;
};
absl::flat_hash_map<std::string, std::pair<HloComputation*, LocTy>>
computation_pool_;
std::vector<std::unique_ptr<HloComputation>> computations_;
std::vector<std::string> error_;
std::function<std::pair<HloInstruction*, LocTy>*(const std::string& name,
const Shape& shape)>
create_missing_instruction_;
NameUniquer name_uniquer_{"."};
};
bool SplitToInt64s(absl::string_view s, char delim, std::vector<int64_t>* out) {
for (const auto& split : absl::StrSplit(s, delim)) {
int64_t val;
if (!absl::SimpleAtoi(split, &val)) {
return false;
}
out->push_back(val);
}
return true;
}
std::vector<ReplicaGroup> CreateReplicaGroups(
absl::Span<const std::vector<int64_t>> groups) {
std::vector<ReplicaGroup> replica_groups;
absl::c_transform(groups, std::back_inserter(replica_groups),
[](const std::vector<int64_t>& ids) {
ReplicaGroup group;
*group.mutable_replica_ids() = {ids.begin(), ids.end()};
return group;
});
return replica_groups;
}
bool HloParserImpl::Error(LocTy loc, absl::string_view msg) {
auto line_col = lexer_.GetLineAndColumn(loc);
const unsigned line = line_col.first;
const unsigned col = line_col.second;
std::vector<std::string> error_lines;
error_lines.push_back(
StrCat("was parsing ", line, ":", col, ": error: ", msg));
error_lines.emplace_back(lexer_.GetLine(loc));
error_lines.push_back(col == 0 ? "" : StrCat(std::string(col - 1, ' '), "^"));
error_.push_back(StrJoin(error_lines, "\n"));
VLOG(1) << "Error: " << error_.back();
return false;
}
bool HloParserImpl::TokenError(absl::string_view msg) {
return Error(lexer_.GetLoc(), msg);
}
absl::Status HloParserImpl::Run(HloModule* module) {
lexer_.Lex();
if ((lexer_.GetKind() == TokKind::kw_HloModule) ||
(lexer_.GetKind() == TokKind::kw_ENTRY) ||
(lexer_.LookAhead() == TokKind::kLbrace)) {
bool parse_module_without_header =
(lexer_.GetKind() == TokKind::kw_HloModule) ? false : true;
if (!ParseHloModule(module, parse_module_without_header)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a HloModule:\n%s",
GetError());
}
return absl::OkStatus();
}
if (!ParseSingleInstruction(module)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a single "
"HloInstruction:\n%s",
GetError());
}
return absl::OkStatus();
}
std::pair<HloInstruction*, HloParserImpl::LocTy>*
HloParserImpl::FindInstruction(const std::string& name,
const optional<Shape>& shape) {
std::pair<HloInstruction*, LocTy>* instr = nullptr;
if (!name.empty()) {
instr = tsl::gtl::FindOrNull(current_name_table(), name);
} | #include "xla/service/hlo_parser.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_frontend_attributes.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::absl::string_view;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
struct TestData {
std::string test_name;
std::string module_string;
int64_t replica_count = 1;
bool enable_verification = true;
};
std::string TestDataToString(const ::testing::TestParamInfo<TestData>& data) {
return data.param.test_name;
}
struct NonRoundtripTestData {
std::string test_name;
std::string input_module_string;
std::string output_module_string;
};
std::string NonRoundtripTestDataToString(
const ::testing::TestParamInfo<NonRoundtripTestData>& data) {
return data.param.test_name;
}
std::vector<TestData> CreateTestCases() {
return std::vector<TestData>({
{
"AxpyParam",
R"(HloModule axpy_module, entry_computation_layout={(f32[], f32[2,4]{1,0}, f32[2,4]{1,0})->f32[2,4]{1,0}}
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)"
},
{
"ParamReplication",
R"(HloModule param_replication_module, entry_computation_layout={(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))->(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))}
ENTRY %param_replication (a: f32[], b: (f32[2,4], (f32[2,4]))) -> (f32[], (f32[2,4], (f32[2,4]))) {
%a = f32[] parameter(0), parameter_replication={true}
%b = (f32[2,4]{1,0}, (f32[2,4]{1,0})) parameter(1), parameter_replication={false,true}
ROOT %tuple = (f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0}))) tuple(f32[] %a, (f32[2,4]{1,0}, (f32[2,4]{1,0})) %b)
}
)"
},
{
"ConstantPred",
R"(HloModule constant_pred_module, entry_computation_layout={()->pred[]}
ENTRY %constant_pred () -> pred[] {
ROOT %constant = pred[] constant(true), metadata={op_type="const" op_name="\"it\'s not a problem\n" source_file="path/to/test.cc" source_line=68}, backend_config="foo\" bar"
}
)"
},
{
"ConstantPredArray",
R"(HloModule module, entry_computation_layout={()->pred[2,3]{1,0}}
ENTRY %constant_pred_array () -> pred[2,3] {
ROOT %constant = pred[2,3]{1,0} constant({ { 0, 1, 0 }, { 1, 0, 1 } })
}
)"
},
{
"ConstantS32",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42)
}
)"
},
{
"ConstantS32WithStatistics",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42), statistics={visualizing_index=1,stat-1=33,stat-2=44}
}
)"
},
{
"ConstantF32",
R"(HloModule ConstantF32_module, entry_computation_layout={()->f32[]}
ENTRY %ConstantF32.v4 () -> f32[] {
ROOT %constant = f32[] constant(42), backend_config="this is a configuration"
}
)"
},
{
"ConstantF32R1Empty",
R"(HloModule ConstantF32Empty_module, entry_computation_layout={()->f32[0]{0}}
ENTRY %ConstantF32Empty.v4 () -> f32[0] {
ROOT %constant = f32[0]{0} constant({})
}
)"
},
{
"ConstantF32R4Empty",
R"(HloModule ConstantF32R4Empty_module, entry_computation_layout={()->f32[2,0,4,3]{3,2,1,0}}
ENTRY %ConstantF32R4Empty.v4 () -> f32[2,0,4,3] {
ROOT %constant = f32[2,0,4,3]{3,2,1,0} constant({ { }, { } })
}
)"
},
{
"Constant4D",
R"(HloModule Small_3x2x1x1_module, entry_computation_layout={()->f32[3,2,1,1]{3,2,1,0}}
ENTRY %Small_3x2x1x1.v1 () -> f32[3,2,1,1] {
ROOT %constant = f32[3,2,1,1]{3,2,1,0} constant({ { { {-1} }, { {4.1} } }, { { {2} }, { {4.1} } }, { { {5} }, { {4.4} } } })
}
)"
},
{
"ConstantNonFinite",
R"(HloModule IsFiniteR1F32s_module, entry_computation_layout={()->pred[6]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> pred[6] {
%constant = f32[6]{0} constant({nan, 7, nan, -1, inf, -inf})
ROOT %is-finite = pred[6]{0} is-finite(f32[6]{0} %constant)
}
)"
},
{
"ConstantNonFiniteE4M3",
R"(HloModule ConstantR1F8E4M3FNs_module, entry_computation_layout={()->f8e4m3fn[3]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3fn[3] {
ROOT %constant = f8e4m3fn[3]{0} constant({nan, 7, -nan})
}
)"
},
{
"ConstantNonFiniteE4M3B11",
R"(HloModule ConstantR1F8E4M3B11_module, entry_computation_layout={()->f8e4m3b11fnuz[2]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3b11fnuz[2] {
ROOT %constant = f8e4m3b11fnuz[2]{0} constant({-nan, 7})
}
)"
},
{
"ConstantF16",
R"(HloModule ConstantF16_module, entry_computation_layout={()->f16[]}
ENTRY %ConstantF16.v4 () -> f16[] {
ROOT %constant = f16[] constant(500)
}
)"
},
{
"BF16",
R"(HloModule BF16, entry_computation_layout={()->bf16[]}
ENTRY %BF16.v4 () -> bf16[] {
ROOT %constant = bf16[] constant(500)
}
)"
},
{
"AddConstants",
R"(HloModule add_constants_module, entry_computation_layout={()->f32[]}
ENTRY %add_constants () -> f32[] {
%constant = f32[] constant(3.14)
ROOT %add = f32[] add(f32[] %constant, f32[] %constant)
}
)"
},
{
"TupleConstant",
R"(HloModule TupleConstant_module, entry_computation_layout={()->(f32[2,1]{1,0}, f32[2]{0})}
ENTRY %TupleConstant.v1 () -> (f32[2,1], f32[2]) {
ROOT %constant = (f32[2,1]{1,0}, f32[2]{0}) constant(( { {1}, {2} }, {2, 42} ))
}
)"
},
{
"SelectR1F32",
R"(HloModule SelectR1F32WithCmpR1F32sFromParamsSmall_module, entry_computation_layout={(f32[4]{0}, f32[4]{0})->f32[4]{0}}
ENTRY %SelectR1F32WithCmpR1F32sFromParamsSmall.v4 (v1: f32[4], v2: f32[4]) -> f32[4] {
%v1 = f32[4]{0} parameter(0), sharding={maximal device=1}
%v2 = f32[4]{0} parameter(1), sharding={maximal device=1}
%greater-than = pred[4]{0} compare(f32[4]{0} %v1, f32[4]{0} %v2), direction=GT, type=TOTALORDER, sharding={replicated}
ROOT %select = f32[4]{0} select(pred[4]{0} %greater-than, f32[4]{0} %v1, f32[4]{0} %v2), sharding={replicated}
}
)"
},
{
"EmptyTupleCreate",
R"(HloModule EmptyTupleCreate_module, entry_computation_layout={()->()}
ENTRY %EmptyTupleCreate.v1 () -> () {
ROOT %tuple = () tuple()
}
)"
},
{
"TupleCreate",
R"(HloModule TupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)"
},
{
"LargeTupleRoundTrip",
R"(HloModule LargeTupleRoundTrip_module, entry_computation_layout={(f32[])->(f32[], f32[], f32[], f32[], f32[], f32[])}
ENTRY %TupleCreate.v4 (v: f32[]) -> (f32[], f32[], f32[], f32[], f32[], f32[]) {
%v = f32[] parameter(0)
ROOT %tuple = (f32[], f32[], f32[], f32[], f32[], f32[]) tuple(f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v)
}
)"
},
{
"ShardedTupleCreate",
R"(HloModule ShardedTupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %ShardedTupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0), sharding={manual}
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3), sharding={{manual}, {maximal device=0}, {replicated}}
}
)"
},
{
"DomainParsing",
R"(HloModule DomainParsing_module, entry_computation_layout={(f32[])->f32[]}
ENTRY %DomainParsing (v1: f32[]) -> f32[] {
%v1 = f32[] parameter(0)
ROOT %dom = f32[] domain(f32[] %v1), domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
}
)"
},
{
"WhileWithScalarS32Result",
R"(HloModule WhileWithScalarS32Result_module, entry_computation_layout={()->s32[]}
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)"
},
{
"CopyStartAndCopyDone",
R"(HloModule CopyStartAndCopyDone_module, entry_computation_layout={(f32[], f32[2,3]{1,0:S(1)})->(f32[], f32[2,3]{1,0:S(2)})}
ENTRY %CopyStartAndCopyDone (v1: f32[], v2: f32[2,3]) -> (f32[], f32[2,3]) {
%v1 = f32[] parameter(0)
%copy-start.1 = (f32[], f32[], u32[]) copy-start(f32[] %v1), cross_program_prefetch_index=0
%copy-done.1 = f32[] copy-done((f32[], f32[], u32[]) %copy-start.1)
%v2 = f32[2,3]{1,0:S(1)} parameter(1)
%copy-start.2 = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(f32[2,3]{1,0:S(1)} %v2)
%copy-done.2 = f32[2,3]{1,0:S(2)} copy-done((f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) %copy-start.2)
ROOT %tuple = (f32[], f32[2,3]{1,0:S(2)}) tuple(f32[] %copy-done.1, f32[2,3]{1,0:S(2)} %copy-done.2)
}
)"
},
{
"SendRecv",
R"(HloModule TwoSendRecvBothWayRecvFist_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, sharding={{maximal device=1}, {replicated}, {replicated}}
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, sharding={{maximal device=1}, {replicated}}
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, sharding={{maximal device=1}, {replicated}, {replicated}}, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, sharding={maximal device=0}
}
)"
},
{
"SendRecvWithHostTransfer",
R"(HloModule HostTransferSendRecv_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, is_host_transfer=true
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, is_host_transfer=true
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
}
)"
},
{
"GetTupleElement",
R"(HloModule GetTupleElement_module, entry_computation_layout={()->s32[2,3]{1,0}}
ENTRY %GetTupleElement.v4 () -> s32[2,3] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
%tuple = (f32[3]{0}, s32[2,3]{1,0}) tuple(f32[3]{0} %constant, s32[2,3]{1,0} %constant.1)
ROOT %get-tuple-element = s32[2,3]{1,0} get-tuple-element((f32[3]{0}, s32[2,3]{1,0}) %tuple), index=1, sharding={maximal device=0}
}
)"
},
{
"Call",
R"(HloModule CallR0F32IdentityScalar_module, entry_computation_layout={()->f32[]}
%Identity.v1 (x: f32[]) -> f32[] {
ROOT %x = f32[] parameter(0)
}
ENTRY %CallR0F32IdentityScalar.v2 () -> f32[] {
%constant = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant), to_apply=%Identity.v1
}
)"
},
{
"CustomCallWithOpaque",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config="this string is opaque"
}
)"
},
{
"CustomCallWithBackendConfigInCurlyBraces",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config={key: "value"}
}
)"
},
{
"CustomCallWithLiteral",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=s32[2]{0} {1, 2}
}
)"
},
{
"CustomCallWithLiteralTuple",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=( s32[4]{0} {4, 128, 128, 3}, pred[4]{0} {1, 0, 0, 0} )
}
)"
},
{
"CustomCallWithLiteralR0",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=f32[] 0.1
}
)"
},
{
"ReduceWindow",
R"(HloModule R4UnitWindow_module, entry_computation_layout={(f32[13,12,8,15]{0,3,2,1})->f32[13,3,8,15]{0,3,2,1}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindow.v3 (operand: f32[13,12,8,15]) -> f32[13,3,8,15] {
%operand = f32[13,12,8,15]{0,3,2,1} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[13,3,8,15]{0,3,2,1} reduce-window(f32[13,12,8,15]{0,3,2,1} %operand, f32[] %constant), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowScalar",
R"(HloModule reduce_window_scalar, entry_computation_layout={()->f32[]}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindowScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = f32[] reduce-window(f32[] %constant, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowVariadic",
R"(HloModule reduce_window_variadic, entry_computation_layout={()->(f32[], f32[])}
%add_F32.v3 (lhs1: f32[], lhs2: f32[], rhs1: f32[], rhs2: f32[]) -> (f32[], f32[]) {
%lhs1 = f32[] parameter(0)
%rhs1 = f32[] parameter(2)
%add1 = f32[] add(f32[] %lhs1, f32[] %rhs1)
%lhs2 = f32[] parameter(1)
%rhs2 = f32[] parameter(3)
%add2 = f32[] add(f32[] %lhs2, f32[] %rhs2)
ROOT %tuple1 = (f32[], f32[]) tuple(f32[] %add1, f32[] %add2)
}
ENTRY %R4UnitWindowScalar () -> (f32[], f32[]) {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = (f32[], f32[]) reduce-window(f32[] %constant, f32[] %constant, f32[] %constant.1, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"Convolution",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}
}
)"
},
{
"ConvolutionDynamic",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %custom-call.52 = f32[1,2,1]{2,0,1} custom-call(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}, custom_call_target="DynamicConvolutionForward", metadata={op_type="Conv2D" op_name="conv1d"}
}
)"
},
{
"ConvolutionR2",
R"(HloModule ConvolveR2_module, entry_computation_layout={(f32[1,2]{1,0}, f32[2,2]{1,0})->f32[1,2]{0,1}}
ENTRY %ConvolveR2.v3 (input: f32[1,2], filter: f32[2,2]) -> f32[1,2] {
%input = f32[1,2]{1,0} parameter(0)
%filter = f32[2,2]{1,0} parameter(1)
ROOT %convolution = f32[1,2]{0,1} convolution(f32[1,2]{1,0} %input, f32[2,2]{1,0} %filter), dim_labels=bf_io->bf
}
)"
},
{
"ConvolutionBackward",
R"(HloModule ConvolveBackward_module, entry_computation_layout={(f32[128,7,7,512]{0,3,2,1}, f32[3,3,512,512]{3,2,1,0})->f32[128,14,14,512]{0,3,2,1}}
ENTRY %ConvolveBackward (input: f32[128,7,7,512], filter: f32[3,3,512,512]) -> f32[128,14,14,512] {
%input = f32[128,7,7,512]{0,3,2,1} parameter(0)
%filter = f32[3,3,512,512]{3,2,1,0} parameter(1)
ROOT %convolution-base-dilated = f32[128,14,14,512]{0,3,2,1} convolution(f32[128,7,7,512]{0,3,2,1} %input, f32[3,3,512,512]{3,2,1,0} %filter), window={size=3x3 pad=1_2x1_2 lhs_dilate=2x2 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
}
)"
},
{
"Reverse4D",
R"(HloModule Reverse4DFloatArrayOnDim01_module, entry_computation_layout={()->f32[4,3,2,1]{0,1,2,3}}
ENTRY %Reverse4DFloatArrayOnDim01.v2 () -> f32[4,3,2,1] {
%constant = f32[4,3,2,1]{0,1,2,3} constant({ { { {1}, {2} }, { {3}, {4} }, { {5}, {6} } }, { { {7}, {8} }, { {9}, {10} }, { {11}, {12} } }, { { {13}, {14} }, { {15}, {16} }, { {17}, {18} } }, { { {19}, {20} }, { {21}, {22} }, { {23}, {24} } } })
ROOT %reverse = f32[4,3,2,1]{0,1,2,3} reverse(f32[4,3,2,1]{0,1,2,3} %constant), dimensions={0,1}
}
)"
},
{
"Concat",
R"(HloModule Concat2x3With2x5_module, entry_computation_layout={()->f32[2,8]{1,0}}
ENTRY %Concat2x3With2x5.v3 () -> f32[2,8] {
%constant = f32[2,3]{1,0} constant({ { 0, 1, 2 }, { 1000, 1001, 1002 } })
%constant.1 = f32[2,5]{1,0} constant({ { 64, 65, 66, 67, 68 }, { 1064, 1065, 1066, 1067, 1068 } })
ROOT %concatenate = f32[2,8]{1,0} concatenate(f32[2,3]{1,0} %constant, f32[2,5]{1,0} %constant.1), dimensions={1}
}
)"
},
{
"SelectAndScatter",
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"SelectAndScatterScalar",
R"(HloModule select_and_scatter_scalar, entry_computation_layout={()->f32[]}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %SelectAndScatterScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
ROOT %select-and-scatter = f32[] select-and-scatter(f32[] %constant, f32[] %constant.1, f32[] %constant.2), select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"Slice",
R"(HloModule slice_module, entry_computation_layout={(f32[3,3,4,4]{3,2,1,0})->f32[3,3,2,4]{3,2,1,0}}
ENTRY %slice.v2 (p0: f32[3,3,4,4]) -> f32[3,3,2,4] {
%p0 = f32[3,3,4,4]{3,2,1,0} parameter(0)
ROOT %slice = f32[3,3,2,4]{3,2,1,0} slice(f32[3,3,4,4]{3,2,1,0} %p0), slice={[0:3:1], [0:3:1], [0:4:2], [0:4:1]}
}
)"
},
{
"SliceNoStride",
R"(HloModule Slice3x3x3_To_1x3x3_F32_module, entry_computation_layout={()->f32[1,3,3]{2,1,0}}
ENTRY %Slice3x3x3_To_1x3x3_F32.v2 () -> f32[1,3,3] {
%constant = f32[3,3,3]{2,1,0} constant({ { { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 } }, { { 9, 10, 11 }, { 12, 13, 14 }, { 15, 16, 17 } }, { { 18, 19, 20 }, { 21, 22, 23 }, { 24, 25, 26 } } })
ROOT %slice = f32[1,3,3]{2,1,0} slice(f32[3,3,3]{2,1,0} %constant), slice={[0:1], [0:3], [0:3]}
}
)"
},
{
"SliceR0",
R"(HloModule SliceR0_module, entry_computation_layout={()->s32[]}
ENTRY %SliceR0.v2 () -> s32[] {
%constant = s32[] constant(1)
ROOT %slice = s32[] slice(s32[] %constant), slice={}
}
)"
},
{
"Transpose",
R"(HloModule Transpose_module, entry_computation_layout={()->s32[1,2,3]{2,1,0}}
ENTRY %Transpose.v2 () -> s32[1,2,3] {
%constant = s32[1,2,3]{2,1,0} constant({ { { 1, 2, 3 }, { 4, 5, 6 } } })
ROOT %transpose = s32[1,2,3]{2,1,0} transpose(s32[1,2,3]{2,1,0} %constant), dimensions={0,1,2}
}
)"
},
{
"TransposeC128",
R"(HloModule TransposeC128_module, entry_computation_layout={(c128[1,2,3]{2,1,0})->c128[1,2,3]{2,1,0}}
ENTRY %Transpose.v3 (input: c128[1,2,3]) -> c128[1,2,3] {
%input = c128[1,2,3]{2,1,0} parameter(0)
ROOT %transpose = c128[1,2,3]{2,1,0} transpose(c128[1,2,3]{2,1,0} %input), dimensions={0,1,2}
}
)"
},
{
"TriangularSolve",
R"(HloModule TriangularSolve_module, entry_computation_layout={(f32[4,4]{1,0}, f32[3,4]{1,0})->f32[3,4]{1,0}}
ENTRY %SimpleRightLowerNotranspose.4 (a.1: f32[4,4], b.2: f32[3,4]) -> f32[3,4] {
%a.1 = f32[4,4]{1,0} parameter(0)
%b.2 = f32[3,4]{1,0} parameter(1)
ROOT %triangular-solve.3 = f32[3,4]{1,0} triangular-solve(f32[4,4]{1,0} %a.1, f32[3,4]{1,0} %b.2), lower=true, transpose_a=NO_TRANSPOSE
}
)"
},
{
"DynamicSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[1]{0})->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[1]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[1]{0} constant({0})
%start_index = s32[1]{0} parameter(1)
%concatenate = s32[3]{0} concatenate(s32[1]{0} %constant, s32[1]{0} %constant, s32[1]{0} %start_index), dimensions={0}
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[3]{0} %concatenate), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicSliceScalarIndices",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[])->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicUpdateSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[4]{0})->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_indices: s32[4]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_indices = s32[4]{0} parameter(2)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[4]{0} %start_indices)
}
)"
},
{
"DynamicUpdateSliceScalarIndex",
R"(HloModule DynamicUpdateSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[], s32[], s32[], s32[])->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)"
},
{
"BatchNormTraining",
R"(HloModule BasicTraining_module, entry_computation_layout={()->(f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BasicTraining.v4 () -> (f32[2,2,1,2], f32[2], f32[2]) {
%constant = f32[2,2,1,2]{3,2,1,0} constant({ { { { 1, 2 } }, { { 3, 4 } } }, { { { 5, 6 } }, { { 7, 8 } } } })
%constant.1 = f32[2]{0} constant({2, 3})
%constant.2 = f32[2]{0} constant({1, 2})
ROOT %batch-norm-training = (f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-training(f32[2,2,1,2]{3,2,1,0} %constant, f32[2]{0} %constant.1, f32[2]{0} %constant.2), epsilon=0.001, feature_index=3
}
)"
},
{
"BatchNormInference",
R"(HloModule BatchNormInference_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0})->f32[2,2,2,2]{3,2,1,0}}
ENTRY %BatchNormInference.v6 (input: f32[2,2,2,2], offset: f32[2], scale: f32[2], mean: f32[2], variance: f32[2]) -> f32[2,2,2,2] {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%offset = f32[2]{0} parameter(1)
%scale = f32[2]{0} parameter(2)
%mean = f32[2]{0} parameter(3)
%variance = f32[2]{0} parameter(4)
ROOT %batch-norm-inference = f32[2,2,2,2]{3,2,1,0} batch-norm-inference(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %offset, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance), epsilon=0.001, feature_index=0
}
)"
},
{
"BatchNormGrad",
R"(HloModule BatchNormGrad_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,2,2,2]{3,2,1,0})->(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BatchNormGrad.v4 (input: f32[2,2,2,2], scale: f32[2], mean: f32[2], variance: f32[2], grad_output: f32[2,2,2,2]) -> (f32[2,2,2,2], f32[2], f32[2]) {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%scale = f32[2]{0} parameter(1)
%mean = f32[2]{0} parameter(2)
%variance = f32[2]{0} parameter(3)
%grad_output = f32[2,2,2,2]{3,2,1,0} parameter(4)
ROOT %batch-norm-grad = (f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-grad(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance, f32[2,2,2,2]{3,2,1,0} %grad_output), epsilon=0.001, feature_index=0
}
)"
},
{
"Fft",
R"(HloModule Fft_module, entry_computation_layout={(c64[8,32]{1,0})->c64[8,32]{1,0}}
ENTRY %Fft (input: c64[8,32]) -> c64[8,32] {
%input = c64[8,32]{1,0} parameter(0)
ROOT %fft = c64[8,32]{1,0} fft(c64[8,32]{1,0} %input), fft_type=FFT, fft_length={32}
}
)"
},
{
"Ifft2d",
R"(HloModule Ifft2d_module, entry_computation_layout={(c64[5,8,32]{2,1,0})->c64[5,8,32]{2,1,0}}
ENTRY %Ifft2d (input: c64[5,8,32]) -> c64[5,8,32] {
%input = c64[5,8,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,8,32]{2,1,0} fft(c64[5,8,32]{2,1,0} %input), fft_type=IFFT, fft_length={8,32}
}
)"
},
{
"Rfft2d",
R"(HloModule Rfft2d_module, entry_computation_layout={(f32[5,64,32]{2,1,0})->c64[5,64,17]{2,1,0}}
ENTRY %Rfft2d (input: f32[5,64,32]) -> c64[5,64,17] {
%input = f32[5,64,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,64,17]{2,1,0} fft(f32[5,64,32]{2,1,0} %input), fft_type=RFFT, fft_length={64,32}
}
)"
},
{
"Irfft3d",
R"(HloModule Irfft3d_module, entry_computation_layout={(c64[5,64,128,33]{3,2,1,0})->f32[5,64,128,64]{3,2,1,0}}
ENTRY %Irfft3d (input: c64[5,64,128,33]) -> f32[5,64,128,64] {
%input = c64[5,64,128,33]{3,2,1,0} parameter(0)
ROOT %fft = f32[5,64,128,64]{3,2,1,0} fft(c64[5,64,128,33]{3,2,1,0} %input), fft_type=IRFFT, fft_length={64,128,64}
}
)"
},
{
"Pad",
R"(HloModule Pad1DS3Array_module, entry_computation_layout={()->f32[7]{0}}
ENTRY %Pad1DS3Array.v3 () -> f32[7] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = f32[] constant(0.1)
ROOT %pad = f32[7]{0} pad(f32[3]{0} %constant, f32[] %constant.1), padding=3_1
}
)"
},
{
"PadHasInterior",
R"(HloModule PadHasInterior_module, entry_computation_layout={(f32[1,25,7,7]{3,2,1,0})->f32[1,2 |
1,840 | cpp | tensorflow/tensorflow | all_gather_combiner | third_party/xla/xla/service/all_gather_combiner.cc | third_party/xla/xla/service/all_gather_combiner_test.cc | #ifndef XLA_SERVICE_ALL_GATHER_COMBINER_H_
#define XLA_SERVICE_ALL_GATHER_COMBINER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/xla_data.pb.h"
namespace xla {
class AllGatherCombiner : public HloModulePass {
public:
AllGatherCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count, bool combine_by_dim);
absl::string_view name() const override { return "all-gather-combiner"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t combine_threshold_in_bytes_;
int64_t combine_threshold_count_;
bool combine_by_dim_;
};
}
#endif
#include "xla/service/all_gather_combiner.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
int64_t FindMostFrequentGatherDim(
absl::Span<HloInstruction* const> to_combine) {
assert(!to_combine.empty());
int64_t min_rank = std::numeric_limits<int64_t>::max();
std::vector<int64_t> frequency;
for (const HloInstruction* it : to_combine) {
int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension();
frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())),
0);
frequency[dim]++;
min_rank = std::min(min_rank, it->shape().rank());
}
int64_t most_frequent_dim = std::distance(
frequency.begin(), std::max_element(frequency.begin(), frequency.end()));
return most_frequent_dim < min_rank ? most_frequent_dim : 0;
}
absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine,
bool combine_by_dim) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " AllGather ops";
HloComputation& computation = *to_combine.back()->parent();
std::vector<HloInstruction*> operands;
std::vector<std::optional<std::vector<int64_t>>> operand_permutations;
std::vector<Shape> output_shapes;
int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine);
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather);
const auto* ag = Cast<HloAllGatherInstruction>(hlo);
TF_RET_CHECK(hlo->operand_count() == 1);
TF_RET_CHECK(hlo->shape().IsArray());
TF_RET_CHECK(!combine_by_dim ||
ag->all_gather_dimension() == most_frequent_dim);
HloInstruction* operand = hlo->operands().front();
operands.push_back(operand);
operand_permutations.emplace_back();
output_shapes.push_back(hlo->shape());
if (ag->all_gather_dimension() != most_frequent_dim) {
const Shape& operand_shape = operand->shape();
auto& perm = operand_permutations.back();
perm = std::vector<int64_t>(operand_shape.rank());
std::iota(perm->begin(), perm->end(), 0);
std::swap((*perm)[most_frequent_dim],
(*perm)[ag->all_gather_dimension()]);
operands.back() =
computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*perm, operand_shape), operand));
output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape());
}
}
HloInstruction* combined;
combined = computation.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloAllGatherInstruction>(to_combine.front())
->use_global_device_ids()));
combined->set_sharding(
hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine));
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
HloInstruction* replacement = computation.AddInstruction(
HloInstruction::CreateGetTupleElement(combined, i));
if (operand_permutations[i]) {
replacement = computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*operand_permutations[i],
replacement->shape()),
replacement));
}
TF_RETURN_IF_ERROR(
computation.ReplaceInstruction(to_combine[i], replacement));
}
return absl::OkStatus();
}
using GroupKey = std::tuple<std::optional<int64_t>, int64_t, bool, bool,
std::vector<std::vector<int64_t>>>;
std::optional<GroupKey> CombineKey(const HloInstruction* instruction,
const HloDomainMap& domain_map,
bool combine_by_dim) {
if (instruction->opcode() != HloOpcode::kAllGather) {
return std::nullopt;
}
std::vector<std::vector<int64_t>> replica_groups;
const auto* ag = Cast<HloAllGatherInstruction>(instruction);
replica_groups.reserve(ag->replica_groups().size());
for (const ReplicaGroup& replica_group : ag->replica_groups()) {
replica_groups.push_back(
std::vector<int64_t>(replica_group.replica_ids().begin(),
replica_group.replica_ids().end()));
}
int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1;
return GroupKey{ag_dim_key, domain_map.GetDomainMetadataId(ag),
ag->channel_id().has_value(), ag->use_global_device_ids(),
replica_groups};
}
}
AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count,
bool combine_by_dim)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count),
combine_by_dim_(combine_by_dim) {}
absl::StatusOr<bool> AllGatherCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllGatherCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip AllGatherCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedCollective(*module,
HloOpcode::kAllGather)) {
VLOG(1) << "Skip AllGatherCombiner because the module contains "
"all-gather with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn = [&](const HloInstruction* instruction) {
return CombineKey(instruction, *domain_map, combine_by_dim_);
};
auto combine_fn =
[&](absl::Span<HloInstruction* const> to_combine) -> absl::Status {
return CombineAllGathers(to_combine, combine_by_dim_);
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn,
combine_threshold_in_bytes_,
combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/all_gather_combiner.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ::testing::Matcher;
namespace op = xla::testing::opcode_matchers;
int64_t kMaxCombineCount = 256;
std::vector<HloAllGatherInstruction*> FindAllGathers(const HloModule& module) {
std::vector<HloAllGatherInstruction*> results;
for (HloComputation* computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (HloInstruction* hlo : computation->instructions()) {
if (auto it = DynCast<HloAllGatherInstruction>(hlo)) {
results.push_back(it);
}
}
}
return results;
}
int64_t AllGatherCount(const HloModule& module) {
return FindAllGathers(module).size();
}
using AllGatherCombinerTest = HloTestBase;
TEST_F(AllGatherCombinerTest, CombineAllGathers) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = f32[32] parameter(1)
allgather0 = f32[128] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[128] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[128], f32[128]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather =
op::AllGather(op::Parameter(0), op::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1)));
}
TEST_F(AllGatherCombinerTest, CombineAllGathersByAllGatherDimension) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,2] parameter(0)
param1 = f32[2,2] parameter(1)
param2 = f32[2,2] parameter(2)
param3 = f32[2,2] parameter(3)
param4 = f32[2,2] parameter(4)
allgather0 = f32[8,2] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[8,2] all-gather(param1), replica_groups={}, dimensions={0}
allgather2 = f32[2,8] all-gather(param2), replica_groups={}, dimensions={1}
allgather3 = f32[2,8] all-gather(param3), replica_groups={}, dimensions={1}
allgather4 = f32[8,2] all-gather(param4), replica_groups={}, dimensions={0}
ROOT tuple = (f32[8,2], f32[8,2], f32[2,8], f32[2,8], f32[8,2])
tuple(allgather0, allgather1, allgather2, allgather3, allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather0 =
op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4));
Matcher<const HloInstruction*> combined_all_gather1 =
op::AllGather(op::Parameter(2), op::Parameter(3));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather0, 0),
op::GetTupleElement(combined_all_gather0, 1),
op::GetTupleElement(combined_all_gather1, 0),
op::GetTupleElement(combined_all_gather1, 1),
op::GetTupleElement(combined_all_gather0, 2)));
}
TEST_F(AllGatherCombinerTest, DoNotCombineOverThreshold) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[8] parameter(0)
param1 = f32[8] parameter(1)
allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(255, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, CombineUpToThreshold) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[8] parameter(0)
param1 = f32[8] parameter(1)
allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(256, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 1);
EXPECT_TRUE(changed);
}
TEST_F(AllGatherCombinerTest, NoDependentCombination) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param = f32[1] parameter(0)
allgather0 = f32[2] all-gather(param), replica_groups={}, dimensions={0}
ROOT allgather1 = f32[4] all-gather(allgather0), replica_groups={},
dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, NoDifferentReplicaGroupsCombination) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = f32[32] parameter(1)
allgather0 = f32[64] all-gather(param0), replica_groups={{0, 1}, {2, 3}},
dimensions={0}
allgather1 = f32[64] all-gather(param1), replica_groups={{0, 2}, {1, 3}},
dimensions={0}
ROOT tuple = (f32[64], f32[64]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, DomainPreventsCombining) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0), sharding={maximal device=0}
param1 = f32[32] parameter(1), sharding={maximal device=1}
allgather0 = f32[128] all-gather(param0),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
allgather1 = f32[128] all-gather(param1),
replica_groups={}, dimensions={0}, sharding={maximal device=1}
domain0 = f32[128] domain(allgather0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}},
exit={maximal device=0}}
domain1 = f32[128] domain(allgather1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}},
exit={maximal device=1}}
ROOT tuple = (f32[128], f32[128]) tuple(domain0, domain1),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, CombineFromTwoDomainsWithSameMetadata) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0), sharding={maximal device=0}
param1 = f32[32] parameter(1), sharding={maximal device=1}
param2 = f32[32] parameter(2), sharding={maximal device=1}
allgather0 = f32[128] all-gather(param0),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
allgather1 = f32[128] all-gather(param1),
replica_groups={}, dimensions={0}, sharding={maximal device=1}
allgather2 = f32[128] all-gather(param2),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
domain0 = f32[128] domain(allgather0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
domain1 = f32[128] domain(allgather1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=1}}
domain2 = f32[128] domain(allgather2),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
ROOT tuple = (f32[128], f32[128], f32[128]) tuple(domain0, domain1,
domain2),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 3);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_TRUE(changed);
const HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
ASSERT_EQ(param0->user_count(), 1);
const HloInstruction* combined_ag = param0->users().front();
ASSERT_EQ(combined_ag->opcode(), HloOpcode::kAllGather);
EXPECT_THAT(combined_ag,
op::Sharding("{{maximal device=0}, {maximal device=0}}"));
}
TEST_F(AllGatherCombinerTest, CombineAllGathersDifferentDims) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,3]{1,0} parameter(0)
param1 = f32[2,3]{0,1} parameter(1)
allgather0 = f32[8,3]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[2,12]{0,1} all-gather(param1), replica_groups={},
dimensions={1}
ROOT tuple = (f32[8,3]{1,0}, f32[2,12]{0,1}) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather =
op::AllGather(op::Parameter(0), op::Bitcast(op::Parameter(1)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::Bitcast(op::GetTupleElement(combined_all_gather, 1))));
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDims) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6,11]{1,0} parameter(4)
allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)),
op::Bitcast(op::Parameter(3)), op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsRank4) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7,2,7]{3,2,1,0} parameter(0)
param1 = f32[3,8,3,8]{3,2,1,0} parameter(1)
param2 = f32[4,9,4,9]{3,0,1,2} parameter(2)
param3 = f32[5,10,5,10]{3,0,1,2} parameter(3)
param4 = f32[6,11,6,11]{3,2,1,0} parameter(4)
allgather0 = f32[8,7,2,7]{3,2,1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8,3,8]{3,2,1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,9,16,9]{3,0,1,2} all-gather(param2), replica_groups={},
dimensions={2}
allgather3 = f32[5,10,20,10]{3,0,1,2} all-gather(param3), replica_groups={},
dimensions={2}
allgather4 = f32[24,11,6,11]{3,2,1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7,2,7]{3,2,1,0}, f32[12,8,3,8]{3,2,1,0},
f32[4,9,16,9]{3,0,1,2}, f32[5,10,20,10]{3,0,1,2},
f32[24,11,6,11]{3,2,1,0}) tuple(allgather0, allgather1, allgather2,
allgather3, allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)),
op::Bitcast(op::Parameter(3)), op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsMixedRanks) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6]{0} parameter(4)
allgather0 = f32[2,28]{1,0} all-gather(param0), replica_groups={},
dimensions={1}
allgather1 = f32[3,32]{1,0} all-gather(param1), replica_groups={},
dimensions={1}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24]{0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[2,28]{1,0}, f32[3,32]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24]{0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Bitcast(op::Parameter(0)), op::Bitcast(op::Parameter(1)),
op::Bitcast(op::Parameter(2)), op::Bitcast(op::Parameter(3)),
op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Bitcast(op::GetTupleElement(combined_all_gather, 0)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 1)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineAllGathersByDim) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6,11]{1,0} parameter(4)
allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather_0 =
op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4));
Matcher<const HloInstruction*> combined_all_gather_1 =
op::AllGather(op::Parameter(2), op::Parameter(3));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather_0, 0),
op::GetTupleElement(combined_all_gather_0, 1),
op::GetTupleElement(combined_all_gather_1, 0),
op::GetTupleElement(combined_all_gather_1, 1),
op::GetTupleElement(combined_all_gather_0, 2)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(2, all_gathers.size());
ASSERT_EQ(0, all_gathers[0]->all_gather_dimension());
ASSERT_EQ(1, all_gathers[1]->all_gather_dimension());
}
}
} |
1,841 | cpp | tensorflow/tensorflow | collective_pipeliner | third_party/xla/xla/service/collective_pipeliner.cc | third_party/xla/xla/service/collective_pipeliner_test.cc | #ifndef XLA_SERVICE_COLLECTIVE_PIPELINER_H_
#define XLA_SERVICE_COLLECTIVE_PIPELINER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePipeliner : public HloModulePass {
public:
enum PipeliningDirection {
kBackward,
kForward,
kForwardSink,
};
using HloPostprocessor =
std::optional<std::function<absl::Status(HloInstruction* instr)>>;
struct Config {
int64_t level_to_operate_on = 0;
int64_t max_pipelining_per_loop = 0;
bool last_run = true;
bool pipeline_use_tree = false;
bool process_different_sized_ops = false;
PipeliningDirection pipelining_direction = PipeliningDirection::kForward;
HloPredicate should_process;
HloPredicate acceptable_formatting;
HloPredicate reuse_pipelined_op_buffer;
HloPredicate should_allow_loop_variant_parameter_in_chain =
HloPredicateFalse;
bool should_allow_control_dependencies = false;
HloPostprocessor postprocess_backward_peeled_op = std::nullopt;
HloPostprocessor postprocess_backward_rotated_op = std::nullopt;
bool should_add_loop_invariant_op_in_chain = false;
};
static const char* const kInsertedByPreviousStep;
static const char* const kSunkByPreviousStep;
explicit CollectivePipeliner(const Config& config) : config_(config) {}
CollectivePipeliner(CollectivePipeliner&& other) = default;
CollectivePipeliner& operator=(CollectivePipeliner&& other) = default;
absl::string_view GetPipelineDirectionString(PipeliningDirection direction) {
switch (direction) {
case PipeliningDirection::kForward: {
return "forward";
}
case PipeliningDirection::kBackward: {
return "backward";
}
case PipeliningDirection::kForwardSink: {
return "forwardsink";
}
}
}
absl::string_view name() const override {
if (config_.pipelining_direction == kForward) {
return "collective-pipeliner-forward";
} else if (config_.pipelining_direction == kBackward) {
return "collective-pipeliner-backward";
} else {
return "collective-pipeliner-forwardsink";
}
}
absl::StatusOr<bool> RunPipeliner(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const Config config_;
};
}
#endif
#include "xla/service/collective_pipeliner.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instruction_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/constant_value.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/value_range.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
const char* const CollectivePipeliner::kInsertedByPreviousStep =
"InsertedByPreviousStep";
const char* const CollectivePipeliner::kSunkByPreviousStep =
"SunkByPreviousStep";
namespace {
using InstructionMap =
absl::flat_hash_map<const HloInstruction*, HloInstruction*>;
using LoopVariantParameterInfo =
std::vector<std::pair<int64_t, HloInstruction*>>;
absl::Status UpdateControlDependencies(HloInstruction* original,
HloInstruction* new_instr,
const InstructionMap& cloned_map) {
for (auto* pred : original->control_predecessors()) {
auto it = cloned_map.find(pred);
if (it == cloned_map.end()) {
continue;
}
TF_RETURN_IF_ERROR(it->second->AddControlDependencyTo(new_instr));
}
return absl::OkStatus();
}
bool AllIndicesConstantsExceptOne(
const HloDynamicUpdateSliceInstruction* dyn_update, int64_t index) {
if (dyn_update->operand(index)->IsConstant()) {
return false;
}
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
if (i == index) {
continue;
}
if (!dyn_update->operand(i)->IsConstant()) {
return false;
}
}
return true;
}
std::optional<int> GetSlicedDimension(
const HloDynamicUpdateSliceInstruction* dyn_update) {
std::optional<int> sliced_dim;
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
const HloInstruction* idx = dyn_update->operand(i);
if (!idx->IsConstant()) {
if (sliced_dim.has_value()) {
return std::nullopt;
}
sliced_dim = i - dyn_update->first_index_operand_number();
continue;
}
if (Cast<HloConstantInstruction>(idx)->literal().GetFirstInteger() != 0) {
return std::nullopt;
}
}
return sliced_dim;
}
bool CheckIndexIsMonotonic(
const HloInstruction* index,
const absl::flat_hash_map<const HloInstruction*, Range>& induction_map) {
Range range = RecursivelyIdentifyRange(index, induction_map);
VLOG(5) << "Range for: " << index->ToString() << " " << range.ToString();
return !range.IsEmpty() && range.IsLinear();
}
bool CheckParameterUsageIsCompatible(const HloInstruction* gte,
const HloInstruction* dus,
const HloInstruction* dus_idx,
int64_t sliced_index) {
for (auto* user : gte->users()) {
if (dus != user) {
VLOG(5) << "CheckParameterUsageIsCompatible(): User not a dynamic slice "
"or the dynamic-update-slice for the output."
<< user->ToString();
return false;
}
if (user->operand(static_cast<HloDynamicSliceInstruction*>(user)
->first_index_operand_number() +
sliced_index) != dus_idx) {
VLOG(5) << "CheckParameterUsageIsCompatible(): Idx is not the same as "
"dynamic-update-slice() "
<< user->ToString();
return false;
}
}
return true;
}
std::optional<int64_t> GetLevelFromCustomCall(const HloInstruction* instr) {
if (!instr->IsCustomCall(CollectivePipeliner::kInsertedByPreviousStep)) {
return std::nullopt;
}
return Cast<HloConstantInstruction>(instr->operand(1))
->literal()
.GetFirstInteger();
}
std::optional<std::vector<HloInstruction*>>
CollectDynamicSliceIndicesIfConstant(HloInstruction* instr) {
CHECK_EQ(instr->opcode(), HloOpcode::kDynamicSlice);
std::vector<HloInstruction*> indices;
HloDynamicSliceInstruction* dyn_slice =
Cast<HloDynamicSliceInstruction>(instr);
for (int64_t i = dyn_slice->first_index_operand_number();
i < instr->operand_count(); ++i) {
HloInstruction* operand = dyn_slice->mutable_operand(i);
CHECK_EQ(operand->shape().dimensions_size(), 0);
std::vector<std::pair<HloInstruction*, int>> stack(
1, std::make_pair(operand, 0));
absl::flat_hash_set<HloInstruction*> visited;
while (!stack.empty()) {
auto& [curr_instr, operand_idx] = stack.back();
if (operand_idx == curr_instr->operand_count()) {
indices.push_back(curr_instr);
stack.pop_back();
continue;
}
HloInstruction* next_operand = curr_instr->mutable_operand(operand_idx++);
if (next_operand->opcode() == HloOpcode::kParameter ||
next_operand->HasSideEffect()) {
return std::nullopt;
}
if (visited.insert(next_operand).second) {
stack.push_back(std::make_pair(next_operand, 0));
}
}
}
return indices;
}
bool IsSupportedLoopIndexType(PrimitiveType type) {
switch (type) {
case PrimitiveType::S32:
case PrimitiveType::S64:
case PrimitiveType::S16:
case PrimitiveType::S8:
case PrimitiveType::U32:
case PrimitiveType::U64:
case PrimitiveType::U16:
case PrimitiveType::U8:
return true;
default:
return false;
}
}
std::optional<Literal> CreateLiteralOfShape(const Shape& shape, int64_t value) {
return primitive_util::PrimitiveTypeSwitch<std::optional<Literal>>(
[&](auto kType) -> std::optional<Literal> {
if constexpr (primitive_util::IsIntegralType(kType)) {
using NativeT = typename primitive_util::NativeTypeOf<kType>;
CHECK_LE(value, static_cast<absl::int128>(
std::numeric_limits<NativeT>::max()));
CHECK_GE(value, static_cast<absl::int128>(
std::numeric_limits<NativeT>::min()));
return LiteralUtil::CreateR0(static_cast<NativeT>(value));
}
return std::nullopt;
},
shape.element_type());
}
bool CollectSimpleDependencies(HloInstruction* i,
std::vector<HloInstruction*>& deps_vector,
absl::flat_hash_set<HloInstruction*>& deps_set) {
if (i->opcode() == HloOpcode::kDynamicSlice) {
auto indices = CollectDynamicSliceIndicesIfConstant(i);
if (!indices.has_value()) {
return false;
}
deps_vector.insert(deps_vector.end(), indices->begin(), indices->end());
deps_set.insert(indices->begin(), indices->end());
return true;
}
for (HloInstruction* op : i->mutable_operands()) {
absl::InlinedVector<HloInstruction*, 4> to_add;
if (op->opcode() == HloOpcode::kBroadcast) {
to_add.push_back(op);
if (deps_set.insert(op).second) {
op = op->mutable_operand(0);
if (op->opcode() == HloOpcode::kConstant) {
if (deps_set.insert(op).second) {
to_add.push_back(op);
}
}
}
}
deps_vector.insert(deps_vector.end(), to_add.rbegin(), to_add.rend());
}
return true;
}
std::pair<HloDynamicUpdateSliceInstruction*, std::vector<HloInstruction*>>
CheckStoreIntoSliceIsCompatible(HloInstruction* instr,
const HloComputation* while_body,
int64_t level_to_operate_on,
bool multi_uses_pipelining,
HloPredicate acceptable_formatting) {
if ((!multi_uses_pipelining && instr->user_count() != 1) ||
instr->operand_count() != 1 || instr->HasControlDependencies()) {
return std::make_pair(nullptr, std::vector<HloInstruction*>{});
}
absl::flat_hash_set<HloInstruction*> added_instructions;
HloInstruction* folded_instr = instr;
std::vector<HloInstruction*> formatting_ops;
auto is_acceptable_user = [&](HloInstruction* i) {
if (i->HasControlDependencies() || !acceptable_formatting(i)) {
return false;
}
if (i->opcode() == HloOpcode::kReduce &&
(ShapeUtil::ElementsIn(i->shape()) ==
ShapeUtil::ElementsIn(instr->operand(0)->shape()) ||
ShapeUtil::ElementsIn(instr->operand(0)->shape()) < 1024)) {
return true;
}
return HloPredicateIsOp<HloOpcode::kSlice, HloOpcode::kDynamicSlice,
HloOpcode::kPad, HloOpcode::kCollectivePermute,
HloOpcode::kConvert, HloOpcode::kReshape,
HloOpcode::kAllReduce, HloOpcode::kTranspose,
HloOpcode::kBroadcast>(i) ||
(multi_uses_pipelining && i->IsElementwise()) ||
i->IsCustomCall(CollectivePipeliner::kInsertedByPreviousStep);
};
auto is_final_slice_insertion = [&](HloInstruction* i) {
HloDynamicUpdateSliceInstruction* dyn_update =
DynCast<HloDynamicUpdateSliceInstruction>(i);
if (dyn_update == nullptr || dyn_update->user_count() != 1) {
return false;
}
if (level_to_operate_on == 0) {
if (dyn_update->users()[0] == while_body->root_instruction()) {
return true;
}
return false;
}
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
if (auto level = GetLevelFromCustomCall(dyn_update->operand(i))) {
if (*level == level_to_operate_on) {
return true;
}
return false;
}
}
return false;
};
HloDynamicUpdateSliceInstruction* final_slice_insertion = nullptr;
std::vector<std::pair<HloInstruction*, int>> stack;
absl::flat_hash_map<HloInstruction*, int32_t> formatting_map;
stack.push_back(std::make_pair(folded_instr, 0));
while (!stack.empty()) {
auto& data = stack.back();
HloInstruction* instr = data.first;
if (data.second == 0 && instr != folded_instr) {
formatting_map[instr] = 0;
}
if (data.second == instr->user_count()) {
stack.pop_back();
continue;
}
HloInstruction* next_user = instr->users()[data.second++];
if (is_final_slice_insertion(next_user)) {
if ((final_slice_insertion != nullptr &&
final_slice_insertion != next_user) ||
next_user->user_count() != 1 || next_user->operand(1) != instr) {
return std::make_pair(nullptr, std::vector<HloInstruction*>{});
}
final_slice_insertion = Cast<HloDynamicUpdateSliceInstruction>(next_user);
continue;
}
if (!is_acceptable_user(next_user)) {
return std::make_pair(nullptr, std::vector<HloInstruction*>{});
}
if (added_instructions.insert(next_user).second) {
stack.push_back(std::make_pair(next_user, 0));
}
}
if (final_slice_insertion == nullptr) {
return std::make_pair(nullptr, std::vector<HloInstruction*>{});
}
for (auto& op : formatting_map) {
for (const HloInstruction* operand : final_slice_insertion->operands()) {
if (formatting_map.count(operand)) {
++op.second;
}
}
}
stack.push_back(std::make_pair(folded_instr, 0));
added_instructions.clear();
while (!stack.empty()) {
auto& data = stack.back();
HloInstruction* instr = data.first;
if (data.second == 0 && instr != folded_instr) {
if (!CollectSimpleDependencies(instr, formatting_ops,
added_instructions)) {
return std::make_pair(nullptr, std::vector<HloInstruction*>{});
}
formatting_ops.push_back(instr);
}
if (data.second == instr->user_count()) {
stack.pop_back();
continue;
}
HloInstruction* next_user = instr->users()[data.second++];
if (is_final_slice_insertion(next_user)) {
if ((final_slice_insertion != nullptr &&
final_slice_insertion != next_user) ||
next_user->user_count() != 1 || next_user->operand(1) != instr) {
return std::make_pair(nullptr, std::vector<HloInstruction*>{});
}
final_slice_insertion = Cast<HloDynamicUpdateSliceInstruction>(next_user);
continue;
}
if (--formatting_map[next_user] > 0) {
continue;
}
if (added_instructions.insert(next_user).second) {
stack.push_back(std::make_pair(next_user, 0));
}
}
return std::make_pair(final_slice_insertion, formatting_ops);
}
bool IsLoopIterator(const HloInstruction* instr,
int64_t loop_iteration_tuple_idx) {
if (instr->opcode() != HloOpcode::kGetTupleElement ||
instr->operand(0)->opcode() != HloOpcode::kParameter) {
return false;
}
return instr->tuple_index() == loop_iteration_tuple_idx;
}
std::vector<HloInstruction*> CollectDependenciesToPipeline(
HloInstruction* source_op, absl::Span<HloInstruction* const> ops) {
absl::flat_hash_set<HloInstruction*> formatting_set(ops.begin(), ops.end());
formatting_set.insert(source_op);
std::vector<HloInstruction*> to_return;
absl::flat_hash_set<HloInstruction*> already_inserted;
for (const HloInstruction* op : ops) {
for (HloInstruction* operand : op->operands()) {
if (!formatting_set.count(operand)) {
formatting_set.insert(operand);
to_return.push_back(operand);
}
}
}
return to_return;
}
std::optional<std::vector<HloInstruction*>> CollectIndependentOperandChain(
HloInstruction* instr, int64_t loop_iter,
const absl::flat_hash_set<const HloInstruction*>& loop_invariant_params,
HloPredicate should_allow_loop_variant_parameter_in_chain,
const absl::flat_hash_set<const HloInstruction*>&
loop_invariant_instructions,
bool should_add_loop_invariant_op_in_chain) {
std::vector<HloInstruction*> chain;
absl::flat_hash_set<const HloInstruction*> visited_set({instr});
std::vector<std::pair<HloInstruction*, int>> stack(1, {instr, 0});
auto is_loop_variant_parameter_input =
[&loop_invariant_params, loop_iter](const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kGetTupleElement ||
instr->operand(0)->opcode() != HloOpcode::kParameter) {
return false;
}
return !IsLoopIterator(instr, loop_iter) &&
!loop_invariant_params.count(instr);
};
while (!stack.empty()) {
auto& curr = stack.back();
if (curr.second == curr.first->operand_count()) {
if (curr.first != instr) {
chain.push_back(curr.first);
}
stack.pop_back();
continue;
}
HloInstruction* curr_operand = curr.first->mutable_operand(curr.second++);
if (curr_operand->opcode() == HloOpcode::kParameter) {
continue;
}
if (is_loop_variant_parameter_input(curr_operand) &&
!should_allow_loop_variant_parameter_in_chain(curr_operand)) {
return std::nullopt;
}
if (visited_set.insert(curr_operand).second) {
stack.emplace_back(curr_operand, 0);
}
}
for (auto* chain_instr : chain) {
if (chain_instr->opcode() == HloOpcode::kAfterAll) {
continue;
}
if (chain_instr->opcode() == HloOpcode::kRecvDone) {
return std::nullopt;
}
const bool all_users_in_chain = absl::c_all_of(
chain_instr->users(), [&visited_set](const HloInstruction* u) {
return visited_set.contains(u);
});
const bool is_scalar_shaped =
ShapeUtil::IsEffectiveScalar(chain_instr->shape());
if (!all_users_in_chain) {
bool allow_loop_variant_parameter_in_chain =
(chain_instr->opcode() != HloOpcode::kGetTupleElement ||
chain_instr->operand(0)->opcode() != HloOpcode::kParameter ||
!should_allow_loop_variant_parameter_in_chain(chain_instr));
bool add_loop_invariant_op_in_chain =
(should_add_loop_invariant_op_in_chain &&
loop_invariant_instructions.contains(chain_instr));
if ((!loop_invariant_params.contains(chain_instr) && !is_scalar_shaped &&
allow_loop_variant_parameter_in_chain) &&
!add_loop_invariant_op_in_chain) {
return std::nullopt;
}
}
}
return std::move(chain);
}
std::optional<std::vector<HloInstruction*>> CollectChainsToPushBackwards(
HloInstruction* instr, int64_t loop_iter, const HloComputation* while_body,
int64_t level_to_operate_on,
const absl::flat_hash_set<const HloInstruction*>& loop_invariant_params,
HloPredicate should_allow_loop_variant_parameter_in_chain,
bool should_allow_control_dependencies,
const absl::flat_hash_set<const HloInstruction*>&
loop_invariant_instructions,
bool should_add_loop_invariant_op_in_chain) {
if (instr->HasControlDependencies() && !should_allow_control_dependencies) {
return std::nullopt;
}
return CollectIndependentOperandChain(
instr, loop_iter, loop_invariant_params,
should_allow_loop_variant_parameter_in_chain, loop_invariant_instructions,
should_add_loop_invariant_op_in_chain);
}
std::optional<int64_t> FindOutputIndexForDynamicUpdateSlice(
const HloInstruction* dus, const HloInstruction* root_instr) {
std::optional<int64_t> output_idx;
while (dus->opcode() == HloOpcode::kDynamicUpdateSlice) {
if (dus->user_count() != 1) {
output_idx = std::nullopt;
break;
}
if (dus->users()[0] == root_instr) {
auto indices = root_instr->OperandIndices(dus);
if (indices.size() != 1) {
output_idx = std::nullopt;
break;
}
output_idx = indices[0];
break;
}
dus = Cast<HloDynamicUpdateSliceInstruction>(dus->users()[0]);
}
return output_idx;
}
std::vector<HloInstruction*> MapNewOperands(
absl::Span<HloInstruction* const> operands, const InstructionMap& clone_map,
bool allow_unmapped = false) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(operands.size());
for (HloInstruction* operand : operands) {
auto it = clone_map.find(operand);
HloInstruction* mapped_operand = operand;
CHECK(it != clone_map.end() || allow_unmapped)
<< operand->ToString() << " not present in map";
if (it != clone_map.end()) {
mapped_operand = it->second;
}
new_operands.push_back(mapped_operand);
}
return new_operands;
}
struct WhileMoveInfo {
HloInstruction* collective_to_move;
HloDynamicUpdateSliceInstruction* dynamic_update_slice;
std::vector<HloInstruction*> formatting_ops;
int64_t sliced_idx;
int64_t output_idx;
};
void UpdateInstructionChannelId(HloInstruction* cloned_instr,
int64_t& next_channel_id) {
if (const auto* send_recv_instr =
DynCast<HloSendRecvInstruction>(cloned_instr)) {
if (!send_recv_instr->is_host_transfer()) {
return;
}
}
if (auto* channel_instr = DynCast<HloChannelInstruction>(cloned_instr)) {
if (channel_instr->opcode() == HloOpcode::kSendDone ||
channel_instr->opcode() == HloOpcode::kRecvDone) {
auto* operand = channel_instr->operand(0);
CHECK(operand->opcode() == HloOpcode::kSend ||
operand->opcode() == HloOpcode::kRecv);
channel_instr->set_channel_id(
Cast<HloChannelInstruction>(operand)->channel_id());
return;
}
if (channel_instr->channel_id()) {
channel_instr->set_channel_id(next_channel_id++);
}
}
}
template <typename Comp>
absl::StatusOr<HloInstruction*> CloneBackwardChain(
Comp& target_computation, const WhileMoveInfo& move_info,
InstructionMap& clone_map, int64_t loop_iter_idx, int64_t& next_channel_id,
LoopVariantParameterInfo* loop_variant_parameter_info = nullptr) {
std::vector<HloInstruction*> to_clone(move_info.formatting_ops.begin(),
move_info.formatting_ops.end());
to_clone.push_back(move_info.collective_to_move);
HloInstruction* last_cloned = nullptr;
for (auto* chain_op : to_clone) {
if (IsLoopIterator(chain_op, loop_iter_idx) ||
clone_map.contains(chain_op)) {
continue;
}
auto new_operands = MapNewOperands(chain_op->operands(), clone_map);
HloInstruction* cloned = target_computation.AddInstruction(
chain_op->CloneWithNewOperands(chain_op->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(chain_op, cloned, clone_map));
UpdateInstructionChannelId(cloned, next_channel_id);
clone_map[chain_op] = cloned;
last_cloned = cloned;
if (loop_variant_parameter_info != nullptr &&
chain_op->opcode() == HloOpcode::kGetTupleElement &&
chain_op->operand(0)->opcode() == HloOpcode::kParameter &&
chain_op->tuple_index() != loop_iter_idx) {
loop_variant_parameter_info->push_back(
std::make_pair(chain_op->tuple_index(), cloned));
}
}
CHECK_NE(last_cloned, nullptr);
return last_cloned;
} | #include "xla/service/collective_pipeliner.h"
#include <functional>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace {
using ::testing::_;
namespace op = xla::testing::opcode_matchers;
class CollectivePipelinerTest : public HloTestBase {
public:
CollectivePipelinerTest() {
const int64_t kNumReplicas = 4;
const int64_t kNumComputations = 2;
config_ = GetModuleConfigForTest(kNumReplicas,
kNumComputations);
}
protected:
const HloPredicate IsAllGather = HloPredicateIsOp<HloOpcode::kAllGather>;
HloModuleConfig config_;
};
absl::StatusOr<bool> RunOptimizer(
HloModule* module, bool last_run, int64_t level_to_operate_on = 0,
bool pipeline_use_tree = false, bool process_different_sized_ops = true,
CollectivePipeliner::PipeliningDirection direction =
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicate should_process = HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicate acceptable_formatting = HloPredicateTrue,
HloPredicate reuse_pipelined_op_buffer = HloPredicateTrue,
HloPredicate should_allow_loop_variant_parameter_in_chain =
HloPredicateFalse,
CollectivePipeliner::HloPostprocessor postprocess_backward_peeled =
std::nullopt,
CollectivePipeliner::HloPostprocessor postprocess_backward_rotated =
std::nullopt,
bool should_add_loop_invariant_op_in_chain = false) {
CollectivePipeliner::Config config = {
level_to_operate_on,
INT64_MAX,
last_run,
pipeline_use_tree,
process_different_sized_ops,
direction,
should_process,
acceptable_formatting,
reuse_pipelined_op_buffer,
should_allow_loop_variant_parameter_in_chain,
false, postprocess_backward_peeled,
postprocess_backward_rotated, should_add_loop_invariant_op_in_chain};
HloPassPipeline pass("optimizer");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<CollectivePipeliner>(config);
pass.AddPass<HloVerifier>(false,
false);
return pass.Run(module);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOne) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(1);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(1);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.5), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,0}},
frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9},{4,10},{5,11},{6,12},{7,13}}"}
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(1);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneCollectivePermuteBackwardCycle) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.5), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}},
frontend_attributes={_xla_send_recv_validation="{{7,13},{6,12},{5,11},{4,10},{3,9},{2,8},{1,7},{0,6}}"}
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(1);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest, UpdateSendRecvChannelIdForHostTransfers) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
after-all = after-all()
send.88 = (s32[], u32[], token[]) send(
add.232, after-all), channel_id=2, is_host_transfer=true
send-done.88 = token[] send-done(send.88), channel_id=2, is_host_transfer=true
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
auto* entry_comp = module->entry_computation();
auto* unrolled_send_done = entry_comp->GetInstructionWithName("send-done.0");
ASSERT_THAT(unrolled_send_done, ::testing::NotNull());
auto* unrolled_send = unrolled_send_done->operand(0);
auto channel_id = [](const HloInstruction* instr) {
return DynCast<HloChannelInstruction>(instr)->channel_id();
};
EXPECT_EQ(channel_id(unrolled_send), channel_id(unrolled_send_done));
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNoReuse) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(
module.get(), true, 0, false, true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kAllReduce>,
[](const HloInstruction* i) { return true; },
[](const HloInstruction* i) { return false; })
.value());
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 5);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNotFirstIdx) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[8,3,128], bf16[8,3,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[8,3,128], bf16[8,3,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[8,3,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[8,3,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[8,1,128] dynamic-slice(get-tuple-element.5, constant.2561, select.1348, constant.2561), dynamic_slice_sizes={8,1,128}
mul = bf16[8,1,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[8,1,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[8,3,128] dynamic-update-slice(get-tuple-element.395, ar.1, constant.2561, select.1348, constant.2561)
ROOT tuple = (s32[], bf16[8,3,128], bf16[8,3,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[8,3,128] parameter(0)
tuple = (s32[], bf16[8,3,128], bf16[8,3,128]) tuple(c0, p0, p0)
while = (s32[], bf16[8,3,128], bf16[8,3,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[8,3,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(2);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(2);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwo) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(1);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(1);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, NoTransformCantProveIndexDoesntWrap) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(4)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-1)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformNegativeIndexIterationToZero) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-sli |
1,842 | cpp | tensorflow/tensorflow | change_op_data_type | third_party/xla/xla/service/change_op_data_type.cc | third_party/xla/xla/service/change_op_data_type_test.cc | #ifndef XLA_SERVICE_CHANGE_OP_DATA_TYPE_H_
#define XLA_SERVICE_CHANGE_OP_DATA_TYPE_H_
#include <functional>
#include <memory>
#include <utility>
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ChangeOpDataType : public HloModulePass {
public:
using HloCloner = std::function<std::unique_ptr<HloInstruction>(
const HloInstruction*, const Shape&, absl::Span<HloInstruction* const>)>;
ChangeOpDataType(
absl::Span<std::pair<PrimitiveType, PrimitiveType> const> from_to_types,
HloPredicate op_matcher, HloCloner cloner = nullptr)
: op_matcher_(op_matcher), cloner_(cloner) {
for (const std::pair<PrimitiveType, PrimitiveType>& pair : from_to_types) {
to_type_map_[pair.first] = pair.second;
}
}
ChangeOpDataType(PrimitiveType from_ty, PrimitiveType to_ty,
HloPredicate op_matcher, HloCloner cloner = nullptr)
: op_matcher_(op_matcher), cloner_(cloner) {
to_type_map_[from_ty] = to_ty;
}
absl::string_view name() const override { return "change-op-data-type"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::flat_hash_map<PrimitiveType, PrimitiveType> to_type_map_;
HloPredicate op_matcher_;
HloCloner cloner_;
};
}
#endif
#include "xla/service/change_op_data_type.h"
#include <optional>
#include "xla/service/hlo_creation_utils.h"
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_matmul_rewriter.h"
#endif
namespace xla {
namespace {
std::optional<PrimitiveType> GetUniformOperandType(
const HloInstruction* instr) {
std::optional<PrimitiveType> type;
for (const HloInstruction* operand : instr->operands()) {
if (!type.has_value()) {
type = operand->shape().element_type();
} else if (operand->shape().element_type() != type.value()) {
return std::nullopt;
}
}
return type;
}
}
absl::StatusOr<bool> ChangeOpDataType::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
HloCloner default_cloner = [](const HloInstruction* inst, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
return inst->CloneWithNewOperands(shape, operands);
};
HloCloner cloner = cloner_ ? cloner_ : default_cloner;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
std::optional<PrimitiveType> operand_type = GetUniformOperandType(instr);
if (!op_matcher_(instr) || !operand_type.has_value() ||
!instr->shape().IsArray() ||
instr->opcode() == HloOpcode::kParameter) {
continue;
}
const PrimitiveType from_type = *operand_type;
auto it = to_type_map_.find(from_type);
if (it == to_type_map_.end()) {
continue;
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
if (instr->opcode() == HloOpcode::kDot &&
cpu::OneDnnMatMulRewriter::ShouldRewrite(instr)) {
continue;
}
#endif
const PrimitiveType to_type = it->second;
absl::InlinedVector<HloInstruction*, 8> new_operands;
for (HloInstruction* operand : instr->mutable_operands()) {
new_operands.push_back(MakeConvertToHlo(operand, to_type));
}
Shape new_shape = instr->shape();
new_shape.set_element_type(to_type);
HloInstruction* new_instr =
comp->AddInstruction(cloner(instr, new_shape, new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(
instr, MakeConvertToHlo(new_instr, from_type)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/change_op_data_type.h"
#include <string>
#include <tuple>
#include <vector>
#include "absl/types/span.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class ChangeOpDataTypeTest : public HloTestBase {
public:
ChangeOpDataTypeTest()
: HloTestBase(false,
false) {}
};
TEST_F(ChangeOpDataTypeTest, Simple) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(F16, F32, HloPredicateTrue);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10})))
.WithShape(F16, {10})));
}
TEST_F(ChangeOpDataTypeTest, AllTypesMustBeSame) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = f16[1] dynamic-slice(f16[10] parameter(0), s32[1] parameter(1)), dynamic_slice_sizes={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(F16, F32, HloPredicateTrue);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ChangeOpDataTypeTest, DotAndConv) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
dot = f16[10,10] dot(f16[10,10] parameter(0), f16[10,10] parameter(1)),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
conv = f16[1,2,1] convolution(f16[1,2,1] parameter(2), f16[1,1,1] parameter(3)),
window={size=1}, dim_labels=b0f_0io->b0f
root = tuple(dot, conv)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(
F16, F32, HloPredicateIsOp<HloOpcode::kDot, HloOpcode::kConvolution>);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Convert(
m::Dot(m::Convert(m::Parameter(0)).WithShape(F32, {10, 10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10, 10})))
.WithShape(F16, {10, 10}),
m::Convert(m::Convolution(
m::Convert(m::Parameter(2)).WithShape(F32, {1, 2, 1}),
m::Convert(m::Parameter(3)).WithShape(F32, {1, 1, 1})))
.WithShape(F16, {1, 2, 1}))));
}
TEST_F(ChangeOpDataTypeTest, SimpleWithCloner) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
HloPredicate matcher = HloPredicateTrue;
int count = 0;
ChangeOpDataType::HloCloner cloner =
[&count](const HloInstruction* instr, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
count++;
return instr->CloneWithNewOperands(shape, operands);
};
ChangeOpDataType pass(F16, F32, matcher, cloner);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_EQ(count, 1);
}
TEST_F(ChangeOpDataTypeTest, SimpleWithMultipleTypes) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
op1 = add(f16[10] parameter(0), f16[10] parameter(1))
op2 = add(u16[10] parameter(2), u16[10] parameter(3))
ROOT tup = (f16[10], u16[10]) tuple(op1, op2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
HloPredicate matcher = HloPredicateTrue;
ChangeOpDataType pass({{F16, F32}, {U16, U32}}, matcher);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(root->operand_count(), 2);
EXPECT_THAT(
root->operand(0),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10})))
.WithShape(F16, {10})));
EXPECT_THAT(
root->operand(1),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(2)).WithShape(U32, {10}),
m::Convert(m::Parameter(3)).WithShape(U32, {10})))
.WithShape(U16, {10})));
}
}
} |
1,843 | cpp | tensorflow/tensorflow | fusion_node_indexing_evaluation | third_party/xla/xla/service/fusion_node_indexing_evaluation.cc | third_party/xla/xla/service/fusion_node_indexing_evaluation_test.cc | #ifndef XLA_SERVICE_FUSION_NODE_INDEXING_EVALUATION_H_
#define XLA_SERVICE_FUSION_NODE_INDEXING_EVALUATION_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/types.h"
namespace xla {
class FusionNodeIndexingEvaluation {
public:
explicit FusionNodeIndexingEvaluation(const HloInstruction* fusion,
int64_t root_usage_count = 1);
bool CodeDuplicationTooHigh(const HloInstruction* producer) const;
bool MaxCodeDuplicationTooHigh() const;
int64_t EvaluateEmittedInstructions(const HloInstruction* producer) const;
void UpdateEvaluationCache(
const HloInstruction* producer,
absl::flat_hash_set<const HloInstruction*> indexing_users_of_producer);
absl::flat_hash_set<const HloInstruction*> RemoveFusionOperand(
HloInstruction* fusion_operand);
private:
static const int64_t kAllowedCodeDuplication;
void RecomputeCache();
void UpdateIndexUsageCount(const HloInstruction* instruction);
void UpdateIndexingUsersOfOperands(const HloInstruction* instruction);
absl::flat_hash_map<const HloInstruction*,
absl::flat_hash_set<const HloInstruction*>>
indexing_users_;
absl::flat_hash_map<const HloInstruction*, int64_t> index_usage_count_;
const HloInstruction* fusion_;
};
}
#endif
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
FusionNodeIndexingEvaluation::FusionNodeIndexingEvaluation(
const HloInstruction* fusion, int64_t root_usage_count)
: fusion_(fusion) {
HloInstruction* root = fusion->fused_expression_root();
indexing_users_[root].insert(fusion);
index_usage_count_[fusion] = root_usage_count;
RecomputeCache();
}
const int64_t FusionNodeIndexingEvaluation::kAllowedCodeDuplication = 15;
namespace {
int64_t UserCount(const HloInstruction* hlo) {
int64_t cnt = 0;
for (HloInstruction* user : hlo->users()) {
if (user->opcode() == HloOpcode::kFusion) {
int64_t operand_index = user->operand_index(hlo);
cnt += user->fused_parameter(operand_index)->user_count();
} else {
++cnt;
}
}
return cnt;
}
}
bool FusionNodeIndexingEvaluation::CodeDuplicationTooHigh(
const HloInstruction* producer) const {
if (producer->opcode() == HloOpcode::kBroadcast) {
return false;
}
int64_t emitted_instructions = EvaluateEmittedInstructions(producer);
return emitted_instructions > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(producer) &&
(emitted_instructions > 1 || UserCount(producer) > 1));
}
bool FusionNodeIndexingEvaluation::MaxCodeDuplicationTooHigh() const {
for (const auto& entry : index_usage_count_) {
if (entry.second > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(entry.first) &&
(entry.second > 1 || UserCount(entry.first) > 1))) {
return true;
}
}
return false;
}
int64_t FusionNodeIndexingEvaluation::EvaluateEmittedInstructions(
const HloInstruction* producer) const {
int64_t total = 0;
for (const auto* user : indexing_users_.at(producer)) {
total += index_usage_count_.at(user);
}
return total;
}
void FusionNodeIndexingEvaluation::UpdateEvaluationCache(
const HloInstruction* producer,
absl::flat_hash_set<const HloInstruction*> indexing_users_of_producer) {
CHECK(!indexing_users_.contains(producer));
indexing_users_[producer] = std::move(indexing_users_of_producer);
UpdateIndexUsageCount(producer);
UpdateIndexingUsersOfOperands(producer);
}
absl::flat_hash_set<const HloInstruction*>
FusionNodeIndexingEvaluation::RemoveFusionOperand(
HloInstruction* fusion_operand) {
auto indexing_users_of_operand =
std::move(indexing_users_.at(fusion_operand));
indexing_users_.erase(fusion_operand);
CHECK(!index_usage_count_.contains(fusion_operand));
return indexing_users_of_operand;
}
void FusionNodeIndexingEvaluation::RecomputeCache() {
auto postorder =
fusion_->fused_instructions_computation()->MakeInstructionPostOrder();
std::reverse(postorder.begin(), postorder.end());
for (const auto* instruction : postorder) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
UpdateIndexUsageCount(instruction);
UpdateIndexingUsersOfOperands(instruction);
}
}
void FusionNodeIndexingEvaluation::UpdateIndexUsageCount(
const HloInstruction* instruction) {
int64_t total = 0;
for (const auto* user : indexing_users_[instruction]) {
total += index_usage_count_.at(user);
}
CHECK(index_usage_count_.emplace(instruction, total).second);
}
void FusionNodeIndexingEvaluation::UpdateIndexingUsersOfOperands(
const HloInstruction* instruction) {
for (const auto* operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kParameter) {
operand = fusion_->operand(operand->parameter_number());
}
if (instruction->opcode() == HloOpcode::kTranspose ||
Shape::Equal().IgnoreElementType()(operand->shape(),
instruction->shape())) {
indexing_users_[operand].insert(indexing_users_[instruction].begin(),
indexing_users_[instruction].end());
} else {
indexing_users_[operand].insert(instruction);
}
}
}
} | #include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/instruction_fusion.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
using FusionNodeIndexingEvaluationTest = HloTestBase;
class InstructionFusionForTesting : public InstructionFusion {
public:
explicit InstructionFusionForTesting()
: InstructionFusion(InstructionFusion::IsExpensive) {}
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation =
fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation) override {
return InstructionFusion::Fuse(producer, consumer, computation);
}
int64_t EvaluateEmittedInstructions(const HloInstruction* producer,
const HloInstruction* consumer) {
if (consumer->opcode() != HloOpcode::kFusion) {
return 0;
}
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
return fusion_node_evaluations_.at(consumer).EvaluateEmittedInstructions(
producer);
}
const FusionNodeIndexingEvaluation* GetFusionNodeEvaluation(
const HloInstruction* consumer) {
auto it = fusion_node_evaluations_.find(consumer);
if (it == fusion_node_evaluations_.end()) {
return nullptr;
}
return &it->second;
}
private:
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
};
TEST_F(FusionNodeIndexingEvaluationTest, FuseTwoInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4,3]{1,0} parameter(0)
add = f32[4,3]{1,0} add(p0, p0)
ROOT sub = f32[4,3]{1,0} subtract(add, p0)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
HloInstruction* add = sub->mutable_operand(0);
InstructionFusionForTesting().Fuse(add, sub, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, FuseThreeInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
slice1 = f32[3]{0} slice(p0), slice={[0:3]}
slice2 = f32[3]{0} slice(p0), slice={[0:3]}
ROOT sub = f32[3]{0} subtract(slice1, slice2)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice1 = sub->mutable_operand(0);
HloInstruction* slice2 = sub->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice1, sub, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2, fusion), 1);
instruction_fusion.Fuse(slice2, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, ExponentialDuplicationPattern) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
slice1.0 = f32[3]{0} slice(add0), slice={[0:3]}
slice1.1 = f32[3]{0} slice(add0), slice={[1:4]}
add1 = f32[3]{0} add(slice1.0, slice1.1)
slice2.0 = f32[2]{0} slice(add1), slice={[0:2]}
slice2.1 = f32[2]{0} slice(add1), slice={[1:3]}
ROOT add2 = f32[2]{0} add(slice2.0, slice2.1)
})")
.value();
HloInstruction* add2 = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice2_0 = add2->mutable_operand(0);
HloInstruction* slice2_1 = add2->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice2_0, add2, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2_1, fusion),
1);
instruction_fusion.Fuse(slice2_1, fusion, module->entry_computation());
HloInstruction* add1 = fusion->mutable_operand(0);
EXPECT_EQ(add1->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add1, fusion), 2);
instruction_fusion.Fuse(add1, fusion, module->entry_computation());
HloInstruction* slice1_0 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_0->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_0, fusion),
2);
instruction_fusion.Fuse(slice1_0, fusion, module->entry_computation());
HloInstruction* slice1_1 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_1->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_1, fusion),
2);
instruction_fusion.Fuse(slice1_1, fusion, module->entry_computation());
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
instruction_fusion.Fuse(add0, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, RecomputeCache) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param_0.5: f32[4]) -> f32[2] {
%param_0.5 = f32[4]{0} parameter(0)
%slice1.2 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[0:3]}
%slice1.3 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[1:4]}
%add1.1 = f32[3]{0} add(f32[3]{0} %slice1.2, f32[3]{0} %slice1.3)
%slice2.2 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[0:2]}
%slice2.3 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[1:3]}
ROOT %add2.1 = f32[2]{0} add(f32[2]{0} %slice2.2, f32[2]{0} %slice2.3)
}
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
ROOT %fusion = f32[2]{0} fusion(add0), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
}
TEST_F(FusionNodeIndexingEvaluationTest, CodeDuplicationTooHigh) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param: f32[6]) -> f32[2] {
%param = f32[6]{0} parameter(0)
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
ENTRY entry_computation {
p0 = f32[] parameter(0)
add = f32[] add(p0, p0)
broadcast = f32[6]{0} broadcast(add), dimensions={}
ROOT %fusion = f32[2]{0} fusion(broadcast), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* broadcast = fusion->mutable_operand(0);
EXPECT_EQ(broadcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(broadcast, fusion),
16);
EXPECT_FALSE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(broadcast));
instruction_fusion.Fuse(broadcast, fusion, module->entry_computation());
HloInstruction* add = fusion->mutable_operand(0);
EXPECT_EQ(add->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add, fusion), 16);
EXPECT_TRUE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(add));
}
} |
1,844 | cpp | tensorflow/tensorflow | convolution_group_converter | third_party/xla/xla/service/convolution_group_converter.cc | third_party/xla/xla/service/convolution_group_converter_test.cc | #ifndef XLA_SERVICE_CONVOLUTION_GROUP_CONVERTER_H_
#define XLA_SERVICE_CONVOLUTION_GROUP_CONVERTER_H_
#include <functional>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/status_macros.h"
namespace xla {
class ConvolutionGroupConverter : public HloModulePass {
public:
ConvolutionGroupConverter(std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only,
bool filter_expansion = true)
: should_expand_(should_expand),
is_cost_viable_(is_cost_viable),
convert_batch_groups_only_(convert_batch_groups_only),
filter_expansion_(filter_expansion) {}
absl::string_view name() const override {
return "convolution-group-converter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
std::function<bool(HloInstruction*)> should_expand_;
std::function<bool(HloInstruction*)> is_cost_viable_;
bool convert_batch_groups_only_;
bool filter_expansion_;
};
}
#endif
#include "xla/service/convolution_group_converter.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
class ConvolutionVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleBatchGroupCount(HloInstruction* convolution);
static bool Run(HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion);
const bool changed() const { return changed_; }
~ConvolutionVisitor() override = default;
private:
explicit ConvolutionVisitor(
HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion)
: computation_(computation),
filter_expansion_(filter_expansion),
convert_batch_groups_only_(convert_batch_groups_only),
should_expand_(should_expand),
is_cost_viable_(is_cost_viable) {}
HloComputation* computation_;
bool changed_ = false;
bool filter_expansion_;
bool convert_batch_groups_only_;
std::function<bool(HloInstruction*)> should_expand_;
std::function<bool(HloInstruction*)> is_cost_viable_;
};
bool ConvolutionVisitor::Run(
HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion) {
ConvolutionVisitor visitor(computation, should_expand, is_cost_viable,
convert_batch_groups_only, filter_expansion);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
Shape ExpandedFilterShape(const Shape& shape, int64_t group_count,
int64_t input_feature_dim) {
int64_t num_dims = shape.dimensions_size();
CHECK_GE(num_dims, 2);
Shape expanded_shape = shape;
expanded_shape.set_dimensions(
input_feature_dim, shape.dimensions(input_feature_dim) * group_count);
return expanded_shape;
}
std::vector<int32_t> GetMaskIds(int64_t group_size, int64_t group_count) {
std::vector<int32_t> values;
values.reserve(group_count * group_size);
for (int i = 0; i < group_count; ++i) {
for (int j = 0; j < group_size; ++j) {
values.push_back(i);
}
}
return values;
}
HloInstruction* GetExpandedFilterMask(
const Shape& filter_shape, int64_t kernel_input_feature_dim,
int64_t kernel_output_feature_dim, int64_t group_count,
const std::function<HloInstruction*(std::unique_ptr<HloInstruction>)>&
add_instruction) {
Shape expanded_filter_shape =
ExpandedFilterShape(filter_shape, group_count, kernel_input_feature_dim);
Shape mask_shape =
ShapeUtil::MakeShape(S32, expanded_filter_shape.dimensions());
int64_t output_feature = filter_shape.dimensions(kernel_output_feature_dim);
int64_t group_size = filter_shape.dimensions(kernel_input_feature_dim);
const std::vector<int32_t> input_feature_filter_mask =
GetMaskIds(group_size, group_count);
const std::vector<int32_t> output_feature_filter_mask =
GetMaskIds(output_feature / group_count, group_count);
auto mask1 = add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(input_feature_filter_mask)));
auto broadcasted_mask1 = add_instruction(HloInstruction::CreateBroadcast(
mask_shape, mask1, {kernel_input_feature_dim}));
auto mask2 = add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(output_feature_filter_mask)));
auto broadcasted_mask2 = add_instruction(HloInstruction::CreateBroadcast(
mask_shape, mask2, {kernel_output_feature_dim}));
Shape predicate_shape =
ShapeUtil::MakeShape(PRED, expanded_filter_shape.dimensions());
return add_instruction(HloInstruction::CreateCompare(
predicate_shape, broadcasted_mask1, broadcasted_mask2,
ComparisonDirection::kEq));
}
absl::Status ConvolutionVisitor::HandleBatchGroupCount(
HloInstruction* convolution) {
auto dim_numbers = convolution->convolution_dimension_numbers();
auto activation = convolution->mutable_operand(0);
auto filter = convolution->mutable_operand(1);
int64_t batch_group_count = convolution->batch_group_count();
if (batch_group_count == 1 ||
(should_expand_ && !should_expand_(convolution))) {
return absl::OkStatus();
}
VLOG(2) << "Dealing with batch_group_count " << batch_group_count
<< " for convolution " << convolution->ToString() << "\n";
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation_->AddInstruction(std::move(inst));
};
int64_t input_batch_dimension = dim_numbers.input_batch_dimension();
const int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
int64_t output_batch_dimension = dim_numbers.output_batch_dimension();
int64_t output_feature_dimension = dim_numbers.output_feature_dimension();
const int64_t kernel_input_feature_dimension =
dim_numbers.kernel_input_feature_dimension();
const int64_t kernel_output_feature_dimension =
dim_numbers.kernel_output_feature_dimension();
const int64_t input_batch =
activation->shape().dimensions(input_batch_dimension);
const int64_t output_feature =
filter->shape().dimensions(kernel_output_feature_dimension);
if (output_feature != batch_group_count || input_batch != batch_group_count) {
std::vector<int64_t> input_sizes(activation->shape().dimensions().begin(),
activation->shape().dimensions().end());
input_sizes[input_batch_dimension] /= batch_group_count;
input_sizes.insert(input_sizes.begin() + input_batch_dimension,
batch_group_count);
activation = MakeReshapeHlo(input_sizes, activation).value();
for (auto& d : *dim_numbers.mutable_input_spatial_dimensions()) {
if (d > input_batch_dimension) {
++d;
}
}
dim_numbers.add_input_spatial_dimensions(input_batch_dimension);
dim_numbers.set_input_batch_dimension(input_batch_dimension + 1);
if (input_feature_dimension > input_batch_dimension) {
dim_numbers.set_input_feature_dimension(input_feature_dimension + 1);
}
std::vector<int64_t> kernel_sizes(filter->shape().dimensions().begin(),
filter->shape().dimensions().end());
kernel_sizes[kernel_output_feature_dimension] /= batch_group_count;
kernel_sizes.insert(kernel_sizes.begin() + kernel_output_feature_dimension,
batch_group_count);
filter = MakeReshapeHlo(kernel_sizes, filter).value();
for (auto& d : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (d > kernel_output_feature_dimension) {
++d;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dimension);
dim_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dimension + 1);
if (kernel_input_feature_dimension > kernel_output_feature_dimension) {
dim_numbers.set_kernel_input_feature_dimension(
kernel_input_feature_dimension + 1);
}
for (auto& d : *dim_numbers.mutable_output_spatial_dimensions()) {
if (d > output_feature_dimension) {
++d;
}
}
dim_numbers.add_output_spatial_dimensions(output_feature_dimension);
dim_numbers.set_output_feature_dimension(output_feature_dimension + 1);
if (output_batch_dimension > output_feature_dimension) {
dim_numbers.set_output_batch_dimension(output_batch_dimension + 1);
}
Window window = convolution->window();
auto window_dim = window.add_dimensions();
window_dim->set_base_dilation(batch_group_count);
window_dim->set_size(batch_group_count);
window_dim->set_stride(batch_group_count - 1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
HloInstruction* new_convolution =
MakeConvolveHlo(
activation, filter, convolution->feature_group_count(),
1, window, dim_numbers,
convolution->precision_config(),
convolution->shape().element_type())
.value();
convolution->SetupDerivedInstruction(new_convolution);
TF_CHECK_OK(computation_->ReplaceInstruction(
convolution,
MakeReshapeHlo(convolution->shape(), new_convolution).value()));
changed_ = true;
return absl::OkStatus();
}
VLOG(2) << "is_cost_viable_ " << is_cost_viable_(convolution);
const bool cost_too_high = !is_cost_viable_(convolution);
if (cost_too_high || filter_expansion_) {
HloInstruction* filter_mask =
GetExpandedFilterMask(convolution->shape(), output_batch_dimension,
output_feature_dimension, batch_group_count, add);
auto expanded_filter_shape = ExpandedFilterShape(
convolution->shape(), batch_group_count, output_batch_dimension);
VLOG(2) << "output_batch_dimension " << output_batch_dimension;
VLOG(2) << "New output shape of convolution "
<< expanded_filter_shape.ToString();
auto new_convolution = add(HloInstruction::CreateConvolve(
expanded_filter_shape, activation, filter,
1, 1,
convolution->window(), dim_numbers, convolution->precision_config()));
VLOG(2) << "Expanded convolution " << new_convolution->ToString();
auto zero = add(HloInstruction::CreateConstant(
LiteralUtil::Zero(expanded_filter_shape.element_type())));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));
auto new_filter = add(HloInstruction::CreateTernary(
expanded_filter_shape, HloOpcode::kSelect, filter_mask, new_convolution,
zero_filter));
PrimitiveType reduce_type = new_filter->shape().element_type();
auto reduce_window_shape = new_convolution->shape();
reduce_window_shape.set_dimensions(output_batch_dimension, 1);
if (primitive_util::BitWidth(reduce_type) < primitive_util::BitWidth(F32)) {
reduce_type = F32;
reduce_window_shape.set_element_type(F32);
Shape convert_shape = new_filter->shape();
convert_shape.set_element_type(F32);
new_filter =
add(HloInstruction::CreateConvert(convert_shape, new_filter));
}
auto zero_literal = LiteralUtil::Zero(reduce_type);
auto zero_scalar =
add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto reduce_function = [&]() -> HloComputation* {
HloComputation::Builder b("add_computation");
Shape shape = ShapeUtil::MakeShape(reduce_type, {});
auto lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "lhs"));
auto rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "rhs"));
auto scalar_op = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, lhs, rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
};
Window window;
for (int64_t i = 0; i < new_convolution->shape().dimensions_size(); ++i) {
auto* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
if (i == output_batch_dimension) {
dim->set_stride(batch_group_count);
dim->set_size(batch_group_count);
} else {
dim->set_stride(1);
dim->set_size(1);
}
}
auto reduce_window = add(HloInstruction::CreateReduceWindow(
reduce_window_shape, new_filter, zero_scalar, window,
reduce_function()));
Shape convert_back_shape = reduce_window->shape();
convert_back_shape.set_element_type(activation->shape().element_type());
auto reduce_window_converted =
HloInstruction::CreateConvert(convert_back_shape, reduce_window);
TF_CHECK_OK(computation_->ReplaceWithNewInstruction(
convolution, std::move(reduce_window_converted)));
changed_ = true;
}
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::HandleConvolution(
HloInstruction* convolution) {
if (convert_batch_groups_only_) {
return HandleBatchGroupCount(convolution);
}
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation_->AddInstruction(std::move(inst));
};
int64_t group_count = convolution->feature_group_count();
if (group_count == 1 || (should_expand_ && !should_expand_(convolution))) {
return absl::OkStatus();
}
changed_ = true;
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
auto filter = convolution->mutable_operand(1);
int64_t kernel_input_feature_dim =
dim_numbers.kernel_input_feature_dimension();
int64_t group_size = filter->shape().dimensions(kernel_input_feature_dim);
int64_t kernel_output_feature_dim =
dim_numbers.kernel_output_feature_dimension();
auto expanded_filter_shape = ExpandedFilterShape(filter->shape(), group_count,
kernel_input_feature_dim);
HloInstruction* filter_mask =
GetExpandedFilterMask(filter->shape(), kernel_input_feature_dim,
kernel_output_feature_dim, group_count, add);
HloInstruction* expanded_filter;
if (group_size == 1) {
bool depthwise_separable =
(group_count == filter->shape().dimensions(kernel_output_feature_dim));
if (!filter_expansion_ && depthwise_separable) {
changed_ = false;
return absl::OkStatus();
}
VLOG(2) << "is_cost_viable_ " << is_cost_viable_(convolution);
if (!is_cost_viable_(convolution) || filter_expansion_) {
Shape reshaped_filter_shape =
ShapeUtil::DeleteDimension(kernel_input_feature_dim, filter->shape());
auto reshaped_filter =
add(HloInstruction::CreateReshape(reshaped_filter_shape, filter));
std::vector<int64_t> broadcast_dims;
for (int64_t i = 0; i < filter->shape().dimensions_size(); ++i) {
if (i == kernel_input_feature_dim) {
continue;
}
broadcast_dims.push_back(i);
}
expanded_filter = add(HloInstruction::CreateBroadcast(
expanded_filter_shape, reshaped_filter, broadcast_dims));
auto zero = add(HloInstruction::CreateConstant(
LiteralUtil::Zero(expanded_filter_shape.element_type())));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));
auto new_filter = add(HloInstruction::CreateTernary(
expanded_filter_shape, HloOpcode::kSelect, filter_mask,
expanded_filter, zero_filter));
auto new_convolution = HloInstruction::CreateConvolve(
convolution->shape(), convolution->mutable_operand(0), new_filter,
1, 1,
convolution->window(), dim_numbers, convolution->precision_config());
return computation_->ReplaceWithNewInstruction(
convolution, std::move(new_convolution));
}
std::vector<int64_t> new_filter_dimension;
new_filter_dimension.reserve(filter->shape().rank() + 1);
const int64_t depthwise_multiplier =
filter->shape().dimensions(kernel_output_feature_dim) / group_count;
for (int64_t i = 0; i < filter->shape().rank(); ++i) {
if (i == kernel_output_feature_dim) {
new_filter_dimension.push_back(group_count);
new_filter_dimension.push_back(depthwise_multiplier);
} else {
new_filter_dimension.push_back(filter->shape().dimensions(i));
}
}
if (kernel_input_feature_dim > kernel_output_feature_dim) {
dim_numbers.set_kernel_input_feature_dimension(kernel_input_feature_dim +
1);
}
for (auto& dim : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (dim > kernel_output_feature_dim) {
++dim;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dim + 1);
HloInstruction* new_filter =
computation_->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(filter->shape().element_type(),
new_filter_dimension),
filter));
auto new_activation_shape = convolution->operand(0)->shape();
dim_numbers.add_input_spatial_dimensions(new_activation_shape.rank());
ShapeUtil::AppendMajorDimension(1, &new_activation_shape);
HloInstruction* new_activation =
computation_->AddInstruction(HloInstruction::CreateReshape(
new_activation_shape, convolution->mutable_operand(0)));
auto new_window = convolution->window();
auto new_dim = new_window.add_dimensions();
new_dim->set_size(depthwise_multiplier);
new_dim->set_window_reversal(true);
new_dim->set_padding_low(depthwise_multiplier - 1);
new_dim->set_padding_high(depthwise_multiplier - 1);
new_dim->set_stride(1);
new_dim->set_window_dilation(1);
new_dim->set_base_dilation(1);
std::vector<int64_t> new_output_dimension;
new_output_dimension.reserve(convolution->shape().rank() + 1);
for (int64_t i = 0; i < convolution->shape().rank(); ++i) {
if (i == dim_numbers.output_feature_dimension()) {
new_output_dimension.push_back(group_count);
new_output_dimension.push_back(depthwise_multiplier);
} else {
new_output_dimension.push_back(convolution->shape().dimensions(i));
}
}
if (dim_numbers.output_batch_dimension() >
dim_numbers.output_feature_dimension()) {
dim_numbers.set_output_batch_dimension(
dim_numbers.output_batch_dimension() + 1);
}
for (auto& dim : *dim_numbers.mutable_output_spatial_dimensions()) {
if (dim > dim_numbers.output_feature_dimension()) {
++dim;
}
}
dim_numbers.add_output_spatial_dimensions(
dim_numbers.output_feature_dimension() + 1);
auto new_convolution_output_shape = ShapeUtil::MakeShape(
convolution->shape().element_type(), new_output_dimension);
HloInstruction* new_convolution =
computation_->AddInstruction(HloInstruction::CreateConvolve(
new_convolution_output_shape, new_activation, new_filter,
group_count, 1,
new_window, dim_numbers, convolution->precision_config()));
return computation_->ReplaceWithNewInstruction(
convolution,
HloInstruction::CreateReshape(convolution->shape(), new_convolution));
}
HloInstruction* activation = convolution->mutable_operand(0);
std::vector<int64_t> input_sizes(activation->shape().dimensions().begin(),
activation->shape().dimensions().end());
const int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
input_sizes[input_feature_dimension] /= group_count;
input_sizes.insert(input_sizes.begin() + input_feature_dimension,
group_count);
activation = MakeReshapeHlo(input_sizes, activation).value();
for (auto& d : *dim_numbers.mutable_input_spatial_dimensions()) {
if (d > input_feature_dimension) {
++d;
}
}
dim_numbers.add_input_spatial_dimensions(input_feature_dimension);
dim_numbers.set_input_feature_dimension(input_feature_dimension + 1);
if (dim_numbers.input_batch_dimension() > input_feature_dimension) {
dim_numbers.set_input_batch_dimension(dim_numbers.input_batch_dimension() +
1);
}
std::vector<int64_t> kernel_sizes(filter->shape().dimensions().begin(),
filter->shape().dimensions().end());
const int64_t kernel_output_feature_dimension =
dim_numbers.kernel_output_feature_dimension();
kernel_sizes[kernel_output_feature_dimension] /= group_count;
kernel_sizes.insert(kernel_sizes.begin() + kernel_output_feature_dimension,
group_count);
filter = MakeReshapeHlo(kernel_sizes, filter).value();
for (auto& d : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (d > kernel_output_feature_dimension) {
++d;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dimension);
dim_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dimension + 1);
if (dim_numbers.kernel_input_feature_dimension() >
kernel_output_feature_dimension) {
dim_numbers.set_kernel_input_feature_dimension(
dim_numbers.kernel_input_feature_dimension() + 1);
}
const int64_t output_feature_dimension =
dim_numbers.output_feature_dimension();
for (auto& d : *dim_numbers.mutable_output_spatial_dimensions()) {
if (d > output_feature_dimension) {
++d;
}
}
dim_numbers.add_output_spatial_dimensions(output_feature_dimension);
dim_numbers.set_output_feature_dimension(output_feature_dimension + 1);
if (dim_numbers.output_batch_dimension() > output_feature_dimension) {
dim_numbers.set_output_batch_dimension(
dim_numbers.output_batch_dimension() + 1);
}
Window window = convolution->window();
auto window_dim = window.add_dimensions();
window_dim->set_base_dilation(group_count);
window_dim->set_size(group_count);
window_dim->set_stride(group_count - 1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
HloInstruction* new_convolution =
MakeConvolveHlo(
activation, filter, 1,
1, window, dim_numbers,
convolution->precision_config(),
convolution->shape().element_type())
.value();
convolution->SetupDerivedInstruction(new_convolution);
changed_ = true;
return computation_->ReplaceInstruction(
convolution,
MakeReshapeHlo(convolution->shape(), new_convolution).value());
}
}
absl::StatusOr<bool> ConvolutionGroupConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "ConvolutionGroupConverter::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (ConvolutionVisitor::Run(comp, should_expand_, is_cost_viable_,
convert_batch_groups_only_,
filter_expansion_)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "ConvolutionGroupConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/convolution_group_converter.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using ConvolutionGroupConverterTest = HloTestBase;
namespace op = testing::opcode_matchers;
TEST_F(ConvolutionGroupConverterTest,
ConvertFeatureGroupCountEqualToInputFeatureDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,2], filter: f32[1,1,2]) -> f32[1,2,2] {
%input = f32[1,2,2]{2,1,0} parameter(0)
%copy = f32[1,2,2]{2,0,1} copy(f32[1,2,2]{2,1,0} %input)
%filter = f32[1,1,2]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,2]{2,0,1} convolution(f32[1,2,2]{2,0,1} %copy, f32[1,1,2]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, feature_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return true; };
ConvolutionGroupConverter converter(should_expand, cost_model,
false);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->feature_group_count(), 1);
EXPECT_THAT(root->operand(1),
op::Select(op::Eq(op::Broadcast(op::Constant()),
op::Broadcast(op::Constant())),
op::Broadcast(op::Reshape(op::Parameter())),
op::Broadcast(op::Constant())));
}
TEST_F(ConvolutionGroupConverterTest,
ConvertFeatureGroupCountDivisorOfInputFeatureDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,4], filter: f32[1,2,2]) -> f32[1,2,2] {
%input = f32[1,2,4]{2,1,0} parameter(0)
%copy = f32[1,2,4]{2,0,1} copy(f32[1,2,4]{2,1,0} %input)
%filter = f32[1,2,2]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,2]{2,0,1} convolution(f32[1,2,4]{2,0,1} %copy, f32[1,2,2]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, feature_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return true; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
false);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->operand(0)->feature_group_count(), 1);
EXPECT_EQ(root->operand(0)->shape().rank(), 4);
}
TEST_F(ConvolutionGroupConverterTest,
ConvertBatchGroupCountEqualToInputBatchDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[16,19,19,512]{3,2,1,0}, filter: f32[16,19,19,512]{3,2,1,0}) -> f32[3,3,512,1]{3,2,1,0} {
%input = f32[16,19,19,512]{3,2,1,0} parameter(0)
%filter = f32[16,19,19,512]{3,2,1,0} parameter(1)
ROOT %convolution = f32[3,3,512,1]{3,2,1,0} convolution(f32[16,19,19,512]{3,2,1,0} %input, f32[16,19,19,512]{3,2,1,0} %filter), window={size=19x19 pad=1_1x1_1}, dim_labels=f01b_i01o->01fb, batch_group_count=512
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return false; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
true);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvert);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kReduceWindow);
}
TEST_F(ConvolutionGroupConverterTest,
ConvertBatchGroupCountNotEqualToInputBatchDim) {
std::string hlo_string = R"(HloModule m
ENTRY main {
%input = f32[1,1,1,4] parameter(0)
%filter = f32[1,1,1,2] parameter(1)
ROOT %convolution = f32[1,1,2,2] convolution(%input,%filter),
window={size=1x1}, dim_labels=f01b_i01o->01fb, batch_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return false; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
true);
ASSERT_TRUE(converter.Run(module.get()).value());
}
}
} |
1,845 | cpp | tensorflow/tensorflow | optimize_input_output_buffer_alias | third_party/xla/xla/service/optimize_input_output_buffer_alias.cc | third_party/xla/xla/service/optimize_input_output_buffer_alias_test.cc | #ifndef XLA_SERVICE_OPTIMIZE_INPUT_OUTPUT_BUFFER_ALIAS_H_
#define XLA_SERVICE_OPTIMIZE_INPUT_OUTPUT_BUFFER_ALIAS_H_
#include <cstdint>
#include <functional>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
class OptimizeInputOutputBufferAlias : public HloModulePass {
public:
OptimizeInputOutputBufferAlias() = default;
explicit OptimizeInputOutputBufferAlias(
bool registered_buffer_donor_only,
std::function<int64_t(const Shape&)> shape_size_fn =
[](const Shape& shape) { return ShapeUtil::ByteSizeOf(shape); })
: registered_buffer_donor_only_(registered_buffer_donor_only),
shape_size_fn_(shape_size_fn) {}
~OptimizeInputOutputBufferAlias() override = default;
absl::string_view name() const override {
return "optimize_input_output_buffer_alias";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
friend class OptimizeInputOutputBufferAliasTest;
bool registered_buffer_donor_only_ = false;
absl::StatusOr<bool> Build(absl::Span<const Shape> input_shapes,
const Shape& output_shape,
HloInputOutputAliasConfig* alias_config,
HloBufferDonorConfig* buffer_donor_config);
std::function<int64_t(const Shape&)> shape_size_fn_ = [](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape);
};
};
}
#endif
#include "xla/service/optimize_input_output_buffer_alias.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> OptimizeInputOutputBufferAlias::Build(
absl::Span<const Shape> input_shapes, const Shape& output_shape,
HloInputOutputAliasConfig* alias_config,
HloBufferDonorConfig* buffer_donor_config) {
bool changed = false;
if (output_shape.is_dynamic()) {
return false;
}
struct DonorEntry {
int64_t param_number;
ShapeIndex index;
int64_t shape_size;
};
absl::flat_hash_map<int64_t, std::vector<DonorEntry>> donors;
for (int64_t param_number = 0; param_number < input_shapes.size();
++param_number) {
const Shape& input_shape = input_shapes[param_number];
TF_RET_CHECK(LayoutUtil::HasLayout(input_shape));
VLOG(1) << "input_shape: " << input_shape.ToString();
ShapeUtil::ForEachSubshape(input_shape, [&](const Shape& subshape,
const ShapeIndex& index) {
if (!LayoutUtil::IsDenseArray(subshape) || subshape.is_dynamic()) {
return;
}
if (alias_config->ParameterHasAlias(param_number, index)) {
return;
}
if (registered_buffer_donor_only_ &&
!buffer_donor_config->ParameterIsBufferDonor(param_number, index)) {
return;
}
int64_t memory_space = subshape.layout().memory_space();
donors[memory_space].emplace_back(
DonorEntry{param_number, index, shape_size_fn_(subshape)});
});
}
struct DoneeEntry {
ShapeIndex index;
int64_t shape_size;
};
absl::flat_hash_map<int64_t, std::vector<DoneeEntry>> donees;
TF_RET_CHECK(LayoutUtil::HasLayout(output_shape));
VLOG(1) << "output_shape: " << output_shape.ToString();
ShapeUtil::ForEachSubshape(
output_shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (!LayoutUtil::IsDenseArray(subshape)) {
return;
}
if (alias_config->OutputHasAlias(index)) {
return;
}
int64_t memory_space = subshape.layout().memory_space();
donees[memory_space].emplace_back(
DoneeEntry{index, shape_size_fn_(subshape)});
});
for (auto& [memory_space, donor_vector] : donors) {
auto donee_it = donees.find(memory_space);
if (donee_it == donees.end()) {
continue;
}
auto& donee_vector = donee_it->second;
absl::c_stable_sort(donor_vector,
[](const DonorEntry& a, const DonorEntry& b) -> bool {
return a.shape_size > b.shape_size;
});
absl::c_stable_sort(donee_vector,
[](const DoneeEntry& a, const DoneeEntry& b) -> bool {
return a.shape_size > b.shape_size;
});
int64_t donor_vector_index = 0;
int64_t donee_vector_index = 0;
while (donor_vector_index < donor_vector.size() &&
donee_vector_index < donee_vector.size()) {
const auto& donor = donor_vector[donor_vector_index];
const auto& donee = donee_vector[donee_vector_index];
if (donor.shape_size > donee.shape_size) {
donor_vector_index += 1;
} else if (donor.shape_size < donee.shape_size) {
donee_vector_index += 1;
} else {
TF_RETURN_IF_ERROR(alias_config->SetUpAlias(
donee.index, donor.param_number, donor.index));
TF_RETURN_IF_ERROR(buffer_donor_config->RemoveBufferDonor(
donor.param_number, donor.index));
donor_vector_index += 1;
donee_vector_index += 1;
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> OptimizeInputOutputBufferAlias::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto& entry_computation_layout = module->entry_computation_layout();
std::vector<Shape> input_shapes;
for (int64_t i = 0; i < module->entry_computation()->num_parameters(); ++i) {
input_shapes.push_back(entry_computation_layout.parameter_shape(i));
}
const Shape& output_shape = entry_computation_layout.result_shape();
HloInputOutputAliasConfig* alias_config =
&module->input_output_alias_config();
HloBufferDonorConfig* buffer_donor_config = &module->buffer_donor_config();
TF_ASSIGN_OR_RETURN(bool changed, Build(input_shapes, output_shape,
alias_config, buffer_donor_config));
TF_RETURN_IF_ERROR(alias_config->Verify(*module, shape_size_fn_));
return changed;
}
} | #include "xla/service/optimize_input_output_buffer_alias.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
class OptimizeInputOutputBufferAliasTest : public HloTestBase {
protected:
OptimizeInputOutputBufferAliasTest() {
r1f32_ = ShapeUtil::MakeShape(F32, {4});
r2f32_ = ShapeUtil::MakeShape(F32, {4, 5});
r3f32_ = ShapeUtil::MakeShape(F32, {4, 5, 6});
r4f32_ = ShapeUtil::MakeShape(F32, {4, 5, 6, 7});
d1f32_ = ShapeUtil::MakeShape(F32, {256}, {true});
d2f32_ = ShapeUtil::MakeShape(F32, {128, 128},
{false, true});
d3f32_ = ShapeUtil::MakeShape(F32, {512});
}
void CreatePassAndBufferDonorConfig(
bool registered_donor_buffer_only = false) {
optimize_pass_ = std::make_unique<OptimizeInputOutputBufferAlias>(
registered_donor_buffer_only);
buffer_donor_config_ = HloBufferDonorConfig();
}
int64_t AliasCount() {
int64_t count = 0;
alias_config_.ForEachAlias(
[&](const ShapeIndex&, const HloInputOutputAliasConfig::Alias&) {
count++;
});
return count;
}
bool BuildAliasConfig(const std::vector<Shape>& input_shapes,
const Shape& output_shape) {
alias_config_ = HloInputOutputAliasConfig(output_shape);
auto changed = optimize_pass_->Build(input_shapes, output_shape,
&alias_config_, &buffer_donor_config_);
TF_CHECK_OK(changed.status());
return changed.value();
}
std::unique_ptr<OptimizeInputOutputBufferAlias> optimize_pass_;
HloInputOutputAliasConfig alias_config_;
HloBufferDonorConfig buffer_donor_config_;
Shape r1f32_;
Shape r2f32_;
Shape r3f32_;
Shape r4f32_;
Shape d1f32_;
Shape d2f32_;
Shape d3f32_;
};
TEST_F(OptimizeInputOutputBufferAliasTest, AllDifferentBufferSizes) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({r1f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, OrderedNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{0});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {3}), ShapeIndex{3});
}
TEST_F(OptimizeInputOutputBufferAliasTest, PartialReuseNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r1f32_, r2f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 2);
EXPECT_TRUE(alias_config_.OutputHasAlias(ShapeIndex{0}));
EXPECT_TRUE(alias_config_.OutputHasAlias(ShapeIndex{1}));
EXPECT_FALSE(alias_config_.OutputHasAlias(ShapeIndex{2}));
EXPECT_FALSE(alias_config_.OutputHasAlias(ShapeIndex{3}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, UnorderedNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r4f32_, r3f32_, r2f32_, r1f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{3});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {3}), ShapeIndex{0});
}
TEST_F(OptimizeInputOutputBufferAliasTest, UnorderedNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({r1f32_}), r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape(
{r1f32_, ShapeUtil::MakeTupleShape({r3f32_, r2f32_}), r2f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 3);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0, 0}), ShapeIndex{0});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex({1, 1}));
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex({1, 0}));
EXPECT_FALSE(alias_config_.ParameterHasAlias(0, {0, 3}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, MultipleParameters) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {{r1f32_, r2f32_, r3f32_, r4f32_}};
Shape output = ShapeUtil::MakeTupleShape({r4f32_, r3f32_, r2f32_, r1f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {}), ShapeIndex{3});
EXPECT_EQ(alias_config_.GetAliasedOutput(1, {}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(2, {}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(3, {}), ShapeIndex{0});
}
TEST_F(OptimizeInputOutputBufferAliasTest, BufferDonorOnly) {
CreatePassAndBufferDonorConfig(true);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({r1f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r2f32_, r1f32_});
TF_CHECK_OK(buffer_donor_config_.AddBufferDonor(0, {0}));
EXPECT_TRUE(buffer_donor_config_.ParameterIsBufferDonor(0, {0}));
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 1);
EXPECT_FALSE(buffer_donor_config_.ParameterIsBufferDonor(0, {0}));
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{1});
EXPECT_FALSE(alias_config_.GetAliasedOutput(0, {1}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeWithTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({d1f32_, d2f32_})};
Shape output = ShapeUtil::MakeTupleShape({d1f32_, d2f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeNoTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d1f32_, d2f32_};
Shape output = d1f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeBufferOutput) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d1f32_, d2f32_};
Shape output = d3f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeBufferInput) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d3f32_};
Shape output = d1f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, AllDifferentMemorySpaces) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
for (int i = 0; i < output.tuple_shapes_size(); ++i) {
output.mutable_tuple_shapes(i)->mutable_layout()->set_memory_space(
Layout::kHostMemorySpace);
}
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
} |
1,846 | cpp | tensorflow/tensorflow | buffer_assignment | third_party/xla/xla/service/buffer_assignment.cc | third_party/xla/xla/service/buffer_assignment_test.cc | #ifndef XLA_SERVICE_BUFFER_ASSIGNMENT_H_
#define XLA_SERVICE_BUFFER_ASSIGNMENT_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iosfwd>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_assignment.pb.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
absl::Status GatherComputationsByAllocationType(
const HloModule* module,
std::vector<const HloComputation*>* thread_local_computations,
std::vector<const HloComputation*>* global_computations);
class BufferAllocation {
public:
using Index = int64_t;
BufferAllocation(Index index, int64_t size, LogicalBuffer::Color color)
: index_(index), size_(size), color_(color) {}
Index index() const { return index_; }
bool is_thread_local() const { return is_thread_local_; }
void set_is_thread_local(bool is_thread_local) {
is_thread_local_ = is_thread_local;
}
bool is_reusable() const {
return !is_thread_local() && !is_tuple();
}
bool is_readonly() const {
return (is_entry_computation_parameter() &&
!is_parameter_aliased_with_output_) ||
is_constant();
}
bool is_tuple() const { return is_tuple_; }
void set_is_tuple(bool is_tuple) { is_tuple_ = is_tuple; }
bool is_entry_computation_parameter() const {
return is_entry_computation_parameter_;
}
bool is_parameter_aliased_with_output() const {
return is_parameter_aliased_with_output_;
}
bool is_constant() const { return is_constant_; }
int64_t parameter_number() const {
CHECK(is_entry_computation_parameter_);
return parameter_number_;
}
const ShapeIndex& param_shape_index() const {
CHECK(is_entry_computation_parameter_);
return param_shape_index_;
}
bool maybe_live_out() const { return maybe_live_out_; }
void set_maybe_live_out(bool value) { maybe_live_out_ = value; }
int64_t size() const { return size_; }
LogicalBuffer::Color color() const { return color_; }
void set_color(LogicalBuffer::Color color) { color_ = color; }
struct OffsetSize {
int64_t offset = 0;
int64_t size = 0;
};
const absl::flat_hash_map<const HloValue*, OffsetSize>& assigned_buffers()
const {
return assigned_buffers_;
}
class Slice {
public:
Slice() {}
Slice(const BufferAllocation* allocation, int64_t offset, int64_t size)
: allocation_(allocation), offset_(offset), size_(size) {}
const BufferAllocation* allocation() const { return allocation_; }
Index index() const { return allocation_->index(); }
int64_t offset() const { return offset_; }
int64_t size() const { return size_; }
bool operator==(const Slice& other) const {
return index() == other.index() && offset_ == other.offset_ &&
size_ == other.size_;
}
bool operator!=(const Slice& other) const { return !(*this == other); }
bool operator<(const Slice& other) const {
if (index() != other.index()) return index() < other.index();
if (offset_ != other.offset_) return offset_ < other.offset_;
return size_ < other.size_;
}
bool OverlapsWith(const Slice& other) const {
const int64_t end = offset_ + size_;
const int64_t other_end = other.offset_ + other.size_;
return index() == other.index() && offset_ < other_end &&
end > other.offset_;
}
template <typename H>
friend H AbslHashValue(H h, const Slice& s) {
return H::combine(std::move(h), s.index(), s.offset(), s.size());
}
std::string ToString() const;
private:
const BufferAllocation* allocation_ = nullptr;
int64_t offset_ = 0;
int64_t size_ = 0;
};
Slice GetSlice(const HloValue& buffer) const;
std::string ToString() const;
std::string ToShortString() const;
BufferAllocationProto ToProto() const;
bool IsInputOrOutput() const {
return is_entry_computation_parameter() || maybe_live_out();
}
bool IsPreallocatedTempBuffer() const {
return !is_entry_computation_parameter() &&
!maybe_live_out() &&
!is_thread_local() &&
!is_constant();
}
void AddHeapTrace(const HeapSimulatorTrace& heap_trace) {
heap_traces_.push_back(heap_trace);
heap_traces_.back().set_buffer_allocation_index(index());
}
std::vector<HeapSimulatorTrace> HeapTraces() const { return heap_traces_; }
const std::vector<const HloValue*>& PeakMemoryLogicalBuffers() const {
return peak_buffers_;
}
int64_t fragmentation_bytes() const { return fragmentation_bytes_; }
bool operator==(const BufferAllocation& other) const {
return index_ == other.index_;
}
bool operator!=(const BufferAllocation& other) const {
return !(*this == other);
}
bool operator<(const BufferAllocation& other) const {
return index() < other.index();
}
void set_entry_computation_parameter(int64_t parameter_number,
ShapeIndex param_shape_index,
bool parameter_aliased_with_output) {
is_entry_computation_parameter_ = true;
is_parameter_aliased_with_output_ = parameter_aliased_with_output;
parameter_number_ = parameter_number;
param_shape_index_ = std::move(param_shape_index);
}
void set_constant(bool is_constant) { is_constant_ = is_constant; }
private:
friend class BufferAssigner;
friend class BufferAssignment;
void AddAssignment(const HloValue& buffer, int64_t offset, int64_t size);
void set_index(Index index) { index_ = index; }
void set_size(int64_t size) { size_ = size; }
Index index_;
int64_t size_;
bool is_thread_local_ = false;
bool is_tuple_ = false;
LogicalBuffer::Color color_;
bool is_entry_computation_parameter_ = false;
bool is_parameter_aliased_with_output_ = false;
int64_t parameter_number_ = 0;
ShapeIndex param_shape_index_;
bool maybe_live_out_ = false;
bool is_constant_ = false;
absl::flat_hash_map<const HloValue*, OffsetSize> assigned_buffers_;
int64_t fragmentation_bytes_ = 0;
std::vector<HeapSimulatorTrace> heap_traces_;
std::vector<const HloValue*> peak_buffers_;
};
std::ostream& operator<<(std::ostream& out, const BufferAllocation& s);
std::ostream& operator<<(std::ostream& out, const BufferAllocation::Slice& s);
class BufferAssignment {
public:
struct BufferIsolationOptions {
std::function<bool(const HloValue*, const HloValue*)> hlo_value_compare;
buffer_assignment::BufferIsolationConfig config;
};
const std::vector<BufferAllocation>& Allocations() const {
return allocations_;
}
int64_t temp_allocation_total_size() const {
return temp_allocation_total_size_;
}
uint64_t multiheap_size_constraint_per_heap() const {
return multiheap_size_constraint_per_heap_;
}
bool HasAllocation(const HloValue& value) const;
bool HasAllocation(HloValue::Id value_id) const;
bool HasAllocation(const HloBuffer& buffer) const;
const BufferAllocation& GetAssignedAllocation(const HloValue& value) const;
const BufferAllocation& GetAssignedAllocation(
const HloBuffer& hlo_buffer) const;
const BufferAllocation& GetAllocation(BufferAllocation::Index index) const;
const BufferAllocation* GetInstructionAllocation(
const HloInstruction* hlo, const ShapeIndex& shape_index) const;
std::set<BufferAllocation::Slice> GetAllSlices(
const HloInstruction* instruction, const ShapeIndex& index) const;
bool HasAllocationAt(const HloInstruction* instruction,
const ShapeIndex& index) const;
bool HasTopLevelAllocation(const HloInstruction* instruction) const;
absl::StatusOr<BufferAllocation::Slice> GetUniqueSlice(
const HloInstruction* instruction, const ShapeIndex& index) const;
absl::StatusOr<BufferAllocation::Slice> GetUniqueTopLevelSlice(
const HloInstruction* instruction) const;
absl::StatusOr<BufferAllocation::Slice> GetUniqueTopLevelOutputSlice() const;
const std::vector<const HloValue*>& GetSourceBuffers(
const HloInstruction* instruction, const ShapeIndex& index) const {
return dataflow_analysis().GetValueSet(instruction, index).values();
}
bool SharesSliceAtIndex(const HloInstruction* hlo_a,
const ShapeIndex& shape_index_a,
const HloInstruction* hlo_b,
const ShapeIndex& shape_index_b) const;
bool SharesTopLevelSlice(const HloInstruction* hlo_a,
const HloInstruction* hlo_b) const {
return SharesSliceAtIndex(hlo_a, {}, hlo_b, {});
}
bool HaveDisjointSlices(const HloInstruction* hlo_a,
const HloInstruction* hlo_b) const;
const HloDataflowAnalysis& dataflow_analysis() const {
return alias_analysis_->dataflow_analysis();
}
HloAliasAnalysis& alias_analysis() const { return *alias_analysis_; }
const HloOrdering& hlo_ordering() const { return *hlo_ordering_; }
const HloLiveRange& hlo_live_range() const { return *hlo_live_range_; }
std::string ToString() const;
std::string ToVerboseString(size_t max_buffers_to_show) const;
std::string BufferInfoString() const;
BufferAssignmentProto ToProto() const;
static absl::StatusOr<std::unique_ptr<BufferAssignment>> FromProto(
const BufferAssignmentProto& proto, const HloModule* module,
BufferValue::SizeFunction buffer_size,
HloDataflowAnalysis::CanShareBuffer can_share_buffer);
struct Stats {
int64_t parameter_allocation_count = 0;
int64_t parameter_allocation_bytes = 0;
int64_t constant_allocation_count = 0;
int64_t constant_allocation_bytes = 0;
int64_t maybe_live_out_allocation_count = 0;
int64_t maybe_live_out_allocation_bytes = 0;
int64_t preallocated_temp_allocation_count = 0;
int64_t preallocated_temp_allocation_bytes = 0;
int64_t preallocated_temp_fragmentation_bytes = -1;
int64_t total_allocation_count = 0;
int64_t total_allocation_bytes = 0;
int64_t total_fragmentation_bytes = -1;
std::string ToString() const;
};
const Stats& GetStats() const { return stats_; }
private:
friend class BufferAssigner;
BufferAssignment(const HloModule* module,
std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
std::unique_ptr<HloAliasAnalysis> alias_analysis,
std::unique_ptr<HloLiveRange> hlo_live_range)
: module_(module),
hlo_ordering_(std::move(hlo_ordering)),
buffer_size_(std::move(buffer_size)),
color_alignment_(std::move(color_alignment)),
alias_analysis_(std::move(alias_analysis)),
hlo_live_range_(std::move(hlo_live_range)) {
int32_t raw_value = module->config()
.debug_options()
.xla_multiheap_size_constraint_per_heap();
multiheap_size_constraint_per_heap_ =
(raw_value == -1) ? UINT64_MAX : raw_value;
}
BufferAllocation* NewEmptyAllocation(int64_t size,
LogicalBuffer::Color color);
BufferAllocation* NewAllocation(const HloBuffer& buffer, int64_t size);
void AddAssignment(BufferAllocation* allocation, const HloBuffer& buffer,
int64_t offset, int64_t size);
void AddAssignment(BufferAllocation* allocation, const HloValue& value,
int64_t offset, int64_t size);
const HloModule& module() const { return *module_; }
BufferAllocation* GetMutableAssignedAllocation(const HloBuffer& buffer);
BufferAllocation* GetMutableAllocation(BufferAllocation::Index index);
int64_t HloBufferSize(const HloBuffer& buffer) {
auto iter = cached_buffer_sizes_.find(buffer.id());
if (iter != cached_buffer_sizes_.end()) return iter->second;
int64_t result = 0;
for (const HloValue* value : buffer.values()) {
result = std::max(result, buffer_size_(*value));
}
cached_buffer_sizes_.insert({buffer.id(), result});
return result;
}
void CombineTempAllocations(
const absl::flat_hash_set<BufferValue::Color>& private_stack_colors,
std::optional<BufferValue::Color> temp_buffer_color);
absl::Status ComputeSummaryStats();
std::vector<BufferAllocation> allocations_;
int64_t temp_allocation_total_size_ = 0;
uint64_t multiheap_size_constraint_per_heap_;
absl::flat_hash_map<const HloValue*, BufferAllocation::Index>
allocation_index_for_value_;
const HloModule* module_;
const std::unique_ptr<HloOrdering> hlo_ordering_;
BufferValue::SizeFunction buffer_size_;
LogicalBuffer::AlignmentFunction color_alignment_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> hlo_live_range_;
Stats stats_;
absl::flat_hash_map<HloBuffer::Id, int64_t> cached_buffer_sizes_;
BufferAssignment(const BufferAssignment&) = delete;
BufferAssignment& operator=(const BufferAssignment&) = delete;
};
class BufferAssigner {
public:
using Colorer =
std::function<absl::Status(HloAliasAnalysis*, const HloOrdering&)>;
using MustNotLiveOut = std::function<bool(
const HloAliasAnalysis&, const HloInstruction*, const ShapeIndex&)>;
using PrivateStacks = absl::flat_hash_map<BufferValue::Color,
std::vector<const HloComputation*>>;
static Colorer DefaultColorer() {
return [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (HloValue* value : alias_analysis->dataflow_analysis().values()) {
const HloPosition& defining_position = value->defining_position();
if (defining_position.shape().has_layout()) {
value->set_color(BufferValue::Color(
defining_position.shape().layout().memory_space()));
} else {
value->set_color(BufferValue::Color(0));
}
}
return absl::OkStatus();
};
}
static absl::StatusOr<std::unique_ptr<BufferAssignment>> Run(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
bool allocate_buffers_for_constants = false,
Colorer colorer = DefaultColorer(),
std::optional<MustNotLiveOut> must_not_live_out = std::nullopt,
HloDataflowAnalysis::CanShareBuffer can_share_buffer = nullptr,
std::unique_ptr<memory_space_assignment::PresetAssignments>
preset_assignments = {},
const PrivateStacks& private_stacks = {},
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare = nullptr,
std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt,
std::optional<BufferValue::Color> temp_buffer_color = std::nullopt);
private:
BufferAssigner(bool allocate_buffers_for_constants, Colorer colorer,
std::optional<MustNotLiveOut> must_not_live_out,
std::unique_ptr<memory_space_assignment::PresetAssignments>
preset_assignments)
: allocate_buffers_for_constants_(allocate_buffers_for_constants),
colorer_(colorer),
must_not_live_out_(must_not_live_out),
preset_assignments_(std::move(preset_assignments)) {}
virtual ~BufferAssigner() = default;
absl::StatusOr<std::unique_ptr<BufferAssignment>> CreateAssignment(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
HloDataflowAnalysis::CanShareBuffer can_share_buffer,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
std::optional<BufferValue::Color> temp_buffer_color);
absl::Status AssignBuffersForComputations(
const std::vector<const HloComputation*>& computations,
bool is_thread_local,
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>*
buffers_to_assign_sequentially,
BufferAssignment* assignment);
bool LiveRangeInterferes(const HloValue* buffer1, const HloValue* buffer2,
BufferAssignment* assignment);
absl::Status AssignPresetBuffers(
absl::flat_hash_set<const HloBuffer*>* assigned_buffers,
BufferAssignment* assignment);
absl::Status AssignSingleHloBuffer(
const HloBuffer* hlo_buffer, bool is_thread_local,
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>*
buffers_to_assign_sequentially,
std::vector<BufferAllocation::Index>* allocation_indices,
BufferAssignment* assignment);
absl::Status AssignBuffersWithSequentialOrdering(
const absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>&
buffers_to_assign_sequentially,
bool run_whole_module_heap_simulation, BufferAssignment* assignment,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options);
void IsolateHeapBuffers(
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
const BufferAssignment* assignment, LogicalBuffer::Color color,
HeapSimulator::Result<HloValue>& result) const;
void AssignBuffersFromHeapSimulator(
HeapSimulator::Result<HloValue>& result, BufferAssignment* assignment,
LogicalBuffer::Color color,
std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options);
bool MaybeAssignBuffer(BufferAllocation* allocation, const HloBuffer& buffer,
BufferAssignment* assignment);
absl::flat_hash_map<LogicalBuffer::Color,
absl::flat_hash_set<const HloValue*>>
SplitBuffersByColor(
const absl::flat_hash_set<const HloValue*>& buffers) const; | #include "xla/service/buffer_assignment.h"
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::PresetAssignments;
using ::testing::UnorderedElementsAre;
class InstructionListVisitor : public DfsHloVisitorWithDefault {
public:
explicit InstructionListVisitor(const HloInstruction* root) : root_(root) {}
absl::Status DefaultAction(HloInstruction* hlo) override {
instructions_.push_back(hlo);
VLOG(0) << "List instruction " << hlo->ToString();
return absl::OkStatus();
}
std::vector<const HloInstruction*> GetInstructions() { return instructions_; }
private:
const HloInstruction* root_;
std::vector<const HloInstruction*> instructions_;
InstructionListVisitor(const InstructionListVisitor&) = delete;
InstructionListVisitor& operator=(const InstructionListVisitor&) = delete;
};
const std::vector<const HloInstruction*> GetInstructions(HloInstruction* root) {
InstructionListVisitor main_list(root);
TF_CHECK_OK(root->Accept(&main_list));
return main_list.GetInstructions();
}
class BufferAssignmentTest : public HloTestBase {
protected:
~BufferAssignmentTest() override {}
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> ConvertToProtoAndBack(
const BufferAssignment* buffers, const HloModule* module) {
auto proto = buffers->ToProto();
return BufferAssignment::FromProto(
proto, module, backend().compiler()->BufferSizeBytesFunction(),
nullptr);
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithSequentialOrdering(
HloModule* module, int64_t alignment = 1,
BufferAssigner::Colorer colorer = BufferAssigner::DefaultColorer(),
const BufferAssigner::PrivateStacks& private_stacks = {},
std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt) {
return BufferAssigner::Run(
module,
std::make_unique<SequentialHloOrdering>(module->schedule()),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true, colorer,
std::nullopt, nullptr,
{}, private_stacks,
nullptr, isolation_options)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersForConstants(
HloModule* module, int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
false)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersReuseForAdd(
HloModule* module, int64_t alignment = 1) {
auto must_not_live_out = [](const HloAliasAnalysis& alias_analysis,
const HloInstruction* instruction,
const ShapeIndex&) {
return instruction->opcode() == HloOpcode::kAdd;
};
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
false,
BufferAssigner::DefaultColorer(),
must_not_live_out)
.value();
}
std::unique_ptr<BufferAssignment> RunColoredBufferAssignment(
HloModule* module, BufferAssigner::Colorer colorer,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true, std::move(colorer))
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithInstructionSequence(
HloModule* module, absl::Span<HloInstruction* const> instruction_sequence,
int64_t alignment = 1) {
HloSchedule schedule(module);
schedule.set_sequence(module->entry_computation(), instruction_sequence);
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithPresetAssignments(
HloModule* module, std::unique_ptr<PresetAssignments> preset_assignments,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true,
BufferAssigner::DefaultColorer(),
std::nullopt,
nullptr, std::move(preset_assignments))
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithIsolationOptions(
HloModule* module, std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt) {
return BufferAssigner::Run(
module,
std::make_unique<SequentialHloOrdering>(module->schedule()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; },
true,
BufferAssigner::DefaultColorer(),
std::nullopt, nullptr,
{}, {},
nullptr, isolation_options)
.value();
}
std::unique_ptr<HloComputation> BuildMapComputationPlus1(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto value = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildReduceComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto param2 =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "y"));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, param2));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileConditionComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto index = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const4->shape(), param, 0));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index,
const4, ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileBodyComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto constv = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto indexc = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const1->shape(), param, 0));
auto addc = builder.AddInstruction(HloInstruction::CreateBinary(
indexc->shape(), HloOpcode::kAdd, indexc, const1));
auto indexv = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(constv->shape(), param, 1));
auto addv = builder.AddInstruction(HloInstruction::CreateBinary(
constv->shape(), HloOpcode::kAdd, indexv, constv));
builder.AddInstruction(HloInstruction::CreateTuple({addc, addv}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildR0F32UnaryOpComputation(
HloOpcode opcode, const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
builder.AddInstruction(HloInstruction::CreateUnary(r0f32_, opcode, param));
return builder.Build();
}
const BufferAllocation& GetAssignedInputAllocation(
const BufferAssignment& buffers, HloInstruction* hlo) {
LOG(INFO) << "Checking input: " << hlo->ToString();
const BufferAllocation& buffer =
*buffers.GetUniqueTopLevelSlice(hlo).value().allocation();
EXPECT_EQ(hlo->parameter_number(), buffer.parameter_number());
return buffer;
}
const BufferAllocation& GetAssignedOutputAllocation(
const BufferAssignment& buffers, HloInstruction* hlo) {
LOG(INFO) << "Checking output: " << hlo->ToString();
const BufferAllocation& buffer = GetTopLevelAllocation(buffers, hlo);
return buffer;
}
const BufferAllocation& GetAllocation(const BufferAssignment& buffers,
const HloInstruction* hlo,
const ShapeIndex& index) {
return *buffers.GetUniqueSlice(hlo, index).value().allocation();
}
const BufferAllocation& GetTopLevelAllocation(const BufferAssignment& buffers,
const HloInstruction* hlo) {
return *buffers.GetUniqueTopLevelSlice(hlo).value().allocation();
}
int64_t ValidateBuffers(
const std::vector<const HloInstruction*>& instructions,
const BufferAssignment& buffers) {
for (const HloInstruction* hlo : instructions) {
if (!buffers.HasTopLevelAllocation(hlo)) {
EXPECT_TRUE(HloOpcode::kConstant == hlo->opcode() ||
HloOpcode::kParameter == hlo->opcode());
continue;
}
}
int64_t total_size = 0;
for (auto& allocation : buffers.Allocations()) {
total_size += allocation.size();
}
return total_size;
}
Shape s32_ = ShapeUtil::MakeShape(xla::S32, {});
Shape r0f32_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
Shape f32vec10_ = ShapeUtil::MakeShape(F32, {10});
Shape f32vec100_ = ShapeUtil::MakeShape(F32, {100});
Shape f32a100x10_ = ShapeUtil::MakeShape(F32, {100, 10});
Shape t_s32_f32v4_ = ShapeUtil::MakeTupleShape({s32_, f32vec4_});
Shape t_s32_f32v10_ = ShapeUtil::MakeTupleShape({s32_, f32vec10_});
};
static bool BuffersDistinct(const std::vector<const HloInstruction*>& a,
const std::vector<const HloInstruction*>& b,
const BufferAssignment& assignment) {
absl::flat_hash_set<BufferAllocation::Slice> a_slices;
for (const HloInstruction* instruction : a) {
if (assignment.HasTopLevelAllocation(instruction)) {
a_slices.insert(assignment.GetUniqueTopLevelSlice(instruction).value());
}
}
for (const HloInstruction* instruction : b) {
if (assignment.HasTopLevelAllocation(instruction)) {
if (a_slices.contains(
assignment.GetUniqueTopLevelSlice(instruction).value())) {
return false;
}
}
}
return true;
}
TEST_F(BufferAssignmentTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
{
auto buffers = RunBufferAssignment(module.get());
EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
}
{
auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
}
}
TEST_F(BufferAssignmentTest, BufferForConst) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
{
auto buffers = RunBufferAssignment(module.get());
EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
EXPECT_TRUE(buffers->HasTopLevelAllocation(const1));
GetAssignedOutputAllocation(*buffers, add);
}
{
auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
EXPECT_FALSE(buffers->HasTopLevelAllocation(const1));
GetAssignedOutputAllocation(*buffers, add);
}
}
TEST_F(BufferAssignmentTest, HasAllocationAt) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({negate, param0, constant}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
EXPECT_EQ(buffers->HasTopLevelAllocation(tuple),
buffers->HasAllocationAt(tuple, {}));
EXPECT_EQ(buffers->HasTopLevelAllocation(negate),
buffers->HasAllocationAt(tuple, {0}));
EXPECT_EQ(buffers->HasTopLevelAllocation(param0),
buffers->HasAllocationAt(tuple, {1}));
EXPECT_EQ(buffers->HasTopLevelAllocation(constant),
buffers->HasAllocationAt(tuple, {2}));
}
TEST_F(BufferAssignmentTest, BufferForOutputConst) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
GetAssignedOutputAllocation(*buffers, copy);
}
TEST_F(BufferAssignmentTest, Basic) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, BasicToFromProto) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers_from_proto,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
const HloDataflowAnalysis& dataflow_orig = buffers_orig->dataflow_analysis();
const HloDataflowAnalysis& dataflow_proto =
buffers_from_proto->dataflow_analysis();
EXPECT_EQ(buffers_orig->Allocations().size(),
buffers_from_proto->Allocations().size());
for (BufferValue::Id id = 0; id < dataflow_orig.values().size(); id++) {
auto& orig_value = dataflow_orig.values().at(id);
if (buffers_orig->HasAllocation(*orig_value)) {
auto& value_proto = dataflow_proto.GetUniqueValueAt(
orig_value->instruction(), orig_value->index());
EXPECT_TRUE(buffers_from_proto->HasAllocation(value_proto));
EXPECT_EQ(orig_value->color(), value_proto.color());
EXPECT_EQ(buffers_orig->GetAssignedAllocation(*orig_value).index(),
buffers_from_proto->GetAssignedAllocation(value_proto).index());
}
}
}
TEST_F(BufferAssignmentTest, AliasedParamCanBeReused) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p0"));
auto neg_1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));
auto neg_2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, neg_1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({}, 0, {}));
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation param_buffer = GetAssignedInputAllocation(*buffers, param);
BufferAllocation neg_1_buffer = GetAllocation(*buffers, neg_1, {});
BufferAllocation neg_2_buffer = GetAllocation(*buffers, neg_2, {});
EXPECT_EQ(param_buffer.index(), neg_1_buffer.index());
EXPECT_EQ(neg_2_buffer.index(), neg_1_buffer.index());
}
TEST_F(BufferAssignmentTest, AddCannotReuse) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignmentNoBuffersReuseForAdd(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& sub_buffer = GetTopLevelAllocation(*buffers, sub);
EXPECT_NE(sub_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), sub_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, BasicUniquelyColored) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
absl::flat_hash_map<const HloInstruction*, int> color_map;
auto colorer = [&](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
int color = 0;
for (HloValue::Id id = 0;
id < alias_analysis->dataflow_analysis().values().size(); id++) {
auto& value = alias_analysis->dataflow_analysis().GetValue(id);
color_map[value.defining_instruction()] = color;
value.set_color(BufferValue::Color(color++));
}
return absl::OkStatus();
};
auto buffers = RunColoredBufferAssignment(module.get(), colorer);
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub); |
1,847 | cpp | tensorflow/tensorflow | while_loop_trip_count_annotator | third_party/xla/xla/service/while_loop_trip_count_annotator.cc | third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_TRIP_COUNT_ANNOTATOR_H_
#define XLA_SERVICE_WHILE_LOOP_TRIP_COUNT_ANNOTATOR_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class WhileLoopTripCountAnnotator : public HloModulePass {
public:
~WhileLoopTripCountAnnotator() override {}
absl::string_view name() const override {
return "while-loop-trip-count-annotator";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/while_loop_trip_count_annotator.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> WhileLoopTripCountAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kWhile) {
continue;
}
if (auto trip_count = ComputeWhileLoopTripCount(instr)) {
WhileLoopBackendConfig config;
config.mutable_known_trip_count()->set_n(*trip_count);
TF_RETURN_IF_ERROR(instr->set_backend_config(config));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TripCountAnnotatorTest : public HloTestBase {};
TEST_F(TripCountAnnotatorTest, KnownSmallTripCount) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(0)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(10, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, KnownLargeTripCount) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(0)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(1000000, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, NonzeroStart) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(10)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(999990, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, LessThanOrEqualTo) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LE
}
ENTRY test {
i_start = s32[] constant(10)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(999991, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, Int64Overflow) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s64[]) parameter(0)
i = s64[] get-tuple-element(param), index=0
one = s64[] constant(1)
i_plus_one = s64[] add(i, one)
ROOT tuple = (s64[]) tuple(i_plus_one)
}
Cond {
param = (s64[]) parameter(0)
i = s64[] get-tuple-element(param), index=0
trip_count = s64[] constant(9223372036854775807)
ROOT done = pred[] compare(i, trip_count), direction=LE
}
ENTRY test {
i_start = s64[] constant(-9223372036854775808)
initial_tuple = (s64[]) tuple(i_start)
ROOT while = (s64[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
EXPECT_FALSE(changed);
}
}
} |
1,848 | cpp | tensorflow/tensorflow | hlo_alias_analysis | third_party/xla/xla/service/hlo_alias_analysis.cc | third_party/xla/xla/service/hlo_alias_analysis_test.cc | #ifndef XLA_SERVICE_HLO_ALIAS_ANALYSIS_H_
#define XLA_SERVICE_HLO_ALIAS_ANALYSIS_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_ordering.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
class HloAliasAnalysis {
public:
static absl::StatusOr<std::unique_ptr<HloAliasAnalysis>> Run(
const HloModule* module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr);
std::string ToString() const;
const HloBuffer& GetBufferContainingValue(const HloValue& value) const {
return *value_to_buffer_.at(&value);
}
HloBuffer& GetBufferContainingValue(const HloValue& value) {
return *value_to_buffer_.at(&value);
}
const HloBuffer& GetBuffer(HloBuffer::Id buffer_id) const {
return buffers_.at(buffer_id);
}
HloBuffer& GetBuffer(HloBuffer::Id buffer_id) {
return buffers_.at(buffer_id);
}
const HloBuffer& GetUniqueBufferAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
HloBuffer& GetUniqueBufferAt(const HloInstruction* instruction,
const ShapeIndex& index = {});
std::vector<const HloBuffer*> ComputeBuffersAt(
const HloInstruction* instruction, const ShapeIndex& index = {}) const;
const std::vector<HloBuffer>& buffers() const { return buffers_; }
HloDataflowAnalysis& dataflow_analysis() const { return *dataflow_analysis_; }
bool BufferLivesOut(const HloBuffer& buffer) const {
return live_out_buffers_.contains(&buffer);
}
bool ValueLivesOut(const HloValue& value) const {
return live_out_buffers_.contains(&GetBufferContainingValue(value));
}
std::vector<const HloBuffer*> LiveOutBuffers() const {
std::vector<const HloBuffer*> results(live_out_buffers_.begin(),
live_out_buffers_.end());
absl::c_sort(results, HloBuffer::IdLessThan);
return results;
}
protected:
explicit HloAliasAnalysis(const HloModule* module);
absl::Status Verify() const;
const HloModule* module_;
absl::flat_hash_set<const HloBuffer*> live_out_buffers_;
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis_;
absl::flat_hash_map<const HloValue*, HloBuffer*> value_to_buffer_;
std::vector<HloBuffer> buffers_;
};
}
#endif
#include "xla/service/hlo_alias_analysis.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/map_util.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
using absl::StrAppend;
namespace {
using FlatValueSet = absl::flat_hash_set<const HloValue*>;
void ComputeInputOutputAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
const HloModule& module = dataflow.module();
const HloComputation& entry_computation = *module.entry_computation();
const HloInputOutputAliasConfig& io_alias_config =
module.input_output_alias_config();
for (const HloPosition& pos : value.positions()) {
if (pos.instruction == entry_computation.root_instruction()) {
std::optional<HloInputOutputAliasConfig::Alias> aliased_input =
io_alias_config.GetAliasedParameter(pos.index);
if (aliased_input) {
aliased_values.insert(
&dataflow.GetUniqueValueAt(entry_computation.parameter_instruction(
aliased_input->parameter_number),
aliased_input->parameter_index));
}
}
}
}
void ComputeWhileAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute kWhile aliases";
for (const HloUse& use : value.GetUses()) {
if (use.instruction->opcode() == HloOpcode::kWhile) {
const HloValue& while_value =
dataflow.GetUniqueValueAt(use.instruction, use.operand_index);
aliased_values.insert(&while_value);
VLOG(3) << " value is init value to a while; must share buffer with "
"while value "
<< while_value;
}
}
if (value.defining_instruction()->opcode() == HloOpcode::kParameter) {
const HloComputation* computation = value.defining_instruction()->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1);
const HloValue& while_value = dataflow.GetUniqueValueAt(
callsite.instruction(), value.defining_index());
VLOG(3) << " value is parameter value of the body or condition of a "
"while; must share buffer with while value "
<< while_value;
aliased_values.insert(&while_value);
}
}
}
for (const HloPosition& position : value.positions()) {
if (!position.instruction->IsRoot()) continue;
const HloComputation* computation = position.instruction->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile &&
callsite.instruction()->while_body() == computation) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1)
<< "Call graph must have been flattened.";
const HloValue& while_value =
dataflow.GetUniqueValueAt(callsite.instruction(), position.index);
VLOG(3) << " value @ " << position << " is root of "
<< callsite.instruction()->name()
<< "; body root and while value root must share buffer "
"among them: "
<< while_value;
aliased_values.insert(&while_value);
}
}
}
}
void ComputeConditionalAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute kConditional aliases";
for (const HloPosition& position : value.positions()) {
if (!position.instruction->IsRoot()) continue;
const HloComputation* computation = position.instruction->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kConditional) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1);
const HloValue& cond_value =
dataflow.GetUniqueValueAt(callsite.instruction(), position.index);
VLOG(3) << " value @ " << position << " is root of "
<< callsite.instruction()->name()
<< "; branch computation roots must share buffer among them : "
<< cond_value;
aliased_values.insert(&cond_value);
}
}
}
}
void ComputeInPlaceOperationAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute aliases for in-place operations (e.g. "
"kDynamicUpdateSlice and kScatter)";
for (const HloPosition& position : value.positions()) {
HloInstruction* instruction = position.instruction;
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(instruction)) {
if (position.index == operand_and_output_index.second) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
const HloValue& operand_value = dataflow.GetUniqueValueAt(
instruction->operand(operand_index.operand_number),
operand_index.operand_index);
VLOG(3) << " operand value " << operand_value << " aliases.";
aliased_values.insert(&operand_value);
}
}
}
for (const HloUse& use : value.GetUses()) {
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(use.instruction)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (use.operand_number == operand_index.operand_number &&
use.operand_index == operand_index.operand_index) {
const HloValue& use_value = dataflow.GetUniqueValueAt(
use.instruction, operand_and_output_index.second);
VLOG(3) << " use value " << use_value << " aliases.";
aliased_values.insert(&use_value);
}
}
}
}
FlatValueSet ComputeAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow) {
if (VLOG_IS_ON(2)) {
for (const HloUse& use : value.GetUses()) {
VLOG(2) << "Use of value " << value << ": " << use;
}
}
FlatValueSet aliased_values{&value};
ComputeInputOutputAliasedValues(value, dataflow, aliased_values);
ComputeWhileAliasedValues(value, dataflow, aliased_values);
ComputeConditionalAliasedValues(value, dataflow, aliased_values);
ComputeInPlaceOperationAliasedValues(value, dataflow, aliased_values);
return aliased_values;
}
std::vector<HloBuffer> CreateBuffers(const HloDataflowAnalysis& dataflow) {
const std::vector<HloValue*>& values = dataflow.values();
size_t num_buffers = values.size();
std::vector<FlatValueSet> buffer_values(values.size());
absl::flat_hash_map<const HloValue*, FlatValueSet*> value_to_set;
value_to_set.reserve(values.size());
for (size_t i = 0; i < values.size(); ++i) {
buffer_values[i].insert(values[i]);
value_to_set[values[i]] = &buffer_values[i];
}
for (const HloValue* value : values) {
VLOG(3) << "Merging colocated values, value: " << *value;
FlatValueSet aliased_values = ComputeAliasedValues(*value, dataflow);
if (aliased_values.size() < 2) continue;
std::vector<std::pair<FlatValueSet*, HloValue::Id>> aliased_sets;
aliased_sets.reserve(aliased_values.size());
for (const HloValue* aliased : aliased_values) {
aliased_sets.push_back({value_to_set[aliased], aliased->id()});
}
auto key = [](const auto& set_and_id) {
return std::make_pair(set_and_id.first->size(), -set_and_id.second);
};
FlatValueSet* union_set =
absl::c_max_element(aliased_sets, LessThanByKey(key))->first;
for (auto& aliased_set_and_id : aliased_sets) {
FlatValueSet* aliased_set = aliased_set_and_id.first;
if ((aliased_set != union_set) && !aliased_set->empty()) {
for (const HloValue* aliased_value : *aliased_set) {
CHECK(union_set->insert(aliased_value).second);
value_to_set[aliased_value] = union_set;
}
aliased_set->clear();
--num_buffers;
}
}
}
std::vector<HloBuffer> buffers;
buffers.reserve(num_buffers);
for (const FlatValueSet& value_set : buffer_values) {
if (!value_set.empty()) {
HloBuffer::Id id = buffers.size();
buffers.push_back({id, HloValueSet(value_set).TakeValues()});
}
}
CHECK_EQ(buffers.size(), num_buffers);
return buffers;
}
}
HloAliasAnalysis::HloAliasAnalysis(const HloModule* module) : module_(module) {}
const HloBuffer& HloAliasAnalysis::GetUniqueBufferAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
std::vector<const HloBuffer*> buffers = ComputeBuffersAt(instruction, index);
CHECK_EQ(buffers.size(), 1);
return *buffers[0];
}
HloBuffer& HloAliasAnalysis::GetUniqueBufferAt(
const HloInstruction* instruction, const ShapeIndex& index) {
return GetBuffer(const_cast<const HloAliasAnalysis*>(this)
->GetUniqueBufferAt(instruction, index)
.id());
}
std::vector<const HloBuffer*> HloAliasAnalysis::ComputeBuffersAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
const HloValueSet& value_set =
dataflow_analysis_->GetValueSet(instruction, index);
std::vector<const HloBuffer*> buffers;
buffers.reserve(value_set.values().size());
for (const HloValue* value : value_set.values()) {
buffers.push_back(&GetBufferContainingValue(*value));
}
absl::c_sort(buffers, HloBuffer::IdLessThan);
buffers.erase(std::unique(buffers.begin(), buffers.end()), buffers.end());
return buffers;
}
absl::Status HloAliasAnalysis::Verify() const {
for (const auto& pair : value_to_buffer_) {
const HloValue* value = pair.first;
const HloBuffer& buffer = *pair.second;
TF_RET_CHECK(absl::c_linear_search(buffer.values(), value));
}
for (HloBuffer::Id id = 0; id < buffers_.size(); ++id) {
const HloBuffer& buffer = buffers_[id];
TF_RET_CHECK(buffer.id() == id);
HloValue::Id last_value_id = -1;
for (const HloValue* value : buffer.values()) {
TF_RET_CHECK(GetBufferContainingValue(*value) == buffer);
TF_RET_CHECK(value->id() > last_value_id);
last_value_id = value->id();
}
}
return absl::OkStatus();
}
std::string HloAliasAnalysis::ToString() const {
std::string out =
absl::StrCat("HloAliasAnalysis, module ", module_->name(), "\n");
StrAppend(&out, " Buffers at each position:\n");
for (const HloComputation* computation : module_->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
StrAppend(&out, " ", instruction->name(), ":\n");
if (instruction->shape().IsTuple()) {
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&out, &instruction, this](const Shape&, const ShapeIndex& index) {
StrAppend(&out, " tuple index ", index.ToString(), ":\n");
for (const HloBuffer* buffer :
ComputeBuffersAt(instruction, index)) {
StrAppend(&out, " ", buffer->ToString(), "\n");
}
});
} else {
for (const HloBuffer* buffer :
ComputeBuffersAt(instruction, {})) {
StrAppend(&out, " ", buffer->ToString(), "\n");
}
}
}
}
StrAppend(&out, " Buffers:\n");
for (const HloBuffer& buffer : buffers()) {
StrAppend(&out, " ", buffer.ToString(), "\n");
StrAppend(&out, " positions:\n");
for (const HloPosition& position : buffer.ComputePositions()) {
StrAppend(&out, " ", position.ToString(), "\n");
}
}
return out;
}
absl::StatusOr<std::unique_ptr<HloAliasAnalysis>> HloAliasAnalysis::Run(
const HloModule* module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
VLOG(2) << "HloAliasAnalysis::Run on module " << module->name();
XLA_VLOG_LINES(2, module->ToString());
auto alias_analysis = absl::WrapUnique(new HloAliasAnalysis(module));
TF_ASSIGN_OR_RETURN(alias_analysis->dataflow_analysis_,
HloDataflowAnalysis::Run(*module, true,
false,
can_share_buffer));
size_t num_values = alias_analysis->dataflow_analysis_->values().size();
alias_analysis->buffers_ = CreateBuffers(alias_analysis->dataflow_analysis());
alias_analysis->value_to_buffer_.reserve(num_values);
for (HloBuffer& buffer : alias_analysis->buffers_) {
for (const HloValue* value : buffer.values()) {
alias_analysis->value_to_buffer_[value] = &buffer;
}
}
CHECK_EQ(alias_analysis->value_to_buffer_.size(), num_values);
TF_DCHECK_OK(alias_analysis->Verify());
HloInstruction* root = module->entry_computation()->root_instruction();
ShapeUtil::ForEachSubshape(root->shape(), [&](const Shape& ,
const ShapeIndex& index) {
std::vector<const HloBuffer*> buffers =
alias_analysis->ComputeBuffersAt(root, index);
alias_analysis->live_out_buffers_.insert(buffers.begin(), buffers.end());
});
XLA_VLOG_LINES(2, alias_analysis->ToString());
return std::move(alias_analysis);
}
} | #include "xla/service/hlo_alias_analysis.h"
#include <map>
#include <memory>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class HloAliasAnalysisTest : public HloTestBase {
protected:
HloAliasAnalysisTest() : HloTestBase() {
module_ = CreateNewVerifiedModule();
}
HloAliasAnalysis& RunAnalysis() {
analysis_ = HloAliasAnalysis::Run(module_.get(),
nullptr)
.value();
return *analysis_;
}
std::vector<HloBuffer> GetBuffersAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
std::set<HloBuffer::Id> buffer_ids;
for (const HloValue* value : analysis_->dataflow_analysis()
.GetValueSet(instruction, index)
.values()) {
buffer_ids.insert(analysis_->GetBufferContainingValue(*value).id());
}
std::vector<HloBuffer> buffers;
buffers.reserve(buffer_ids.size());
for (HloBuffer::Id id : buffer_ids) {
buffers.push_back(analysis_->GetBuffer(id));
}
return buffers;
}
const HloValue& GetValueDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
return analysis_->dataflow_analysis().GetValueDefinedAt(instruction, index);
}
bool AnyValuesInSameBufferInterfere() {
DependencyHloOrdering ordering(module_.get());
for (const HloBuffer& buffer : analysis_->buffers()) {
for (const HloValue* value_a : buffer.values()) {
for (const HloValue* value_b : buffer.values()) {
if (*value_a != *value_b &&
ordering.MayInterfere(*value_a, *value_b,
analysis_->dataflow_analysis())) {
VLOG(1) << *value_a << " interferes with " << *value_b
<< " in buffer: " << buffer;
return true;
}
}
}
}
return false;
}
bool InstructionBuffersAreAmbiguous(const HloInstruction* instruction) const {
for (const auto& pair :
analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) {
const HloValueSet& value_set = pair.second;
const HloBuffer* buffer = nullptr;
for (const HloValue* value : value_set.values()) {
if (buffer == nullptr) {
buffer = &analysis_->GetBufferContainingValue(*value);
} else if (buffer != &analysis_->GetBufferContainingValue(*value)) {
return true;
}
}
}
return false;
}
bool InstructionBuffersAreDistinct(const HloInstruction* instruction) const {
absl::flat_hash_set<const HloBuffer*> buffers_seen;
for (const auto& pair :
analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) {
const HloValueSet& value_set = pair.second;
absl::flat_hash_set<const HloBuffer*> buffers_at_this_index;
for (const HloValue* value : value_set.values()) {
buffers_at_this_index.insert(
&analysis_->GetBufferContainingValue(*value));
}
buffers_seen.merge(buffers_at_this_index);
if (!buffers_at_this_index.empty()) return false;
}
return true;
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloAliasAnalysis> analysis_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloAliasAnalysisTest, BinaryOperation) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 3);
for (const HloInstruction* instruction : {constant1, constant2, add}) {
EXPECT_EQ(analysis.GetUniqueBufferAt(instruction).GetUniqueValue(),
GetValueDefinedAt(instruction));
}
EXPECT_FALSE(InstructionBuffersAreAmbiguous(add));
EXPECT_TRUE(InstructionBuffersAreDistinct(add));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, TupleAndGtes) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
builder.AddInstruction(
HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 4);
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}).GetUniqueValue(),
GetValueDefinedAt(tuple, {}));
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {0}).GetUniqueValue(),
GetValueDefinedAt(param0));
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {1}).GetUniqueValue(),
GetValueDefinedAt(param1));
EXPECT_EQ(analysis.GetUniqueBufferAt(param0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(param0),
analysis.GetUniqueBufferAt(gte0));
EXPECT_THAT(
analysis.GetUniqueBufferAt(param0).ComputePositions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{gte0, {}}));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple));
EXPECT_TRUE(InstructionBuffersAreDistinct(tuple));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, NondistinctTuple) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({param0, param1, param0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.GetUniqueBufferAt(param0).ComputePositions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{tuple, {2}}));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple));
EXPECT_FALSE(InstructionBuffersAreDistinct(tuple));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, ParametersWithAliasing) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {0}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {1}));
}
TEST_F(HloAliasAnalysisTest, ParametersWithCrossAliasing) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {1}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {0}));
ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {1}));
}
TEST_F(HloAliasAnalysisTest, InputOutputAliasingWithWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_tuple = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, param));
auto while_element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 0));
auto while_element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 1));
auto negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, while_element_1));
auto negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, while_element_2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate_1, negate_2}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(),
UnorderedElementsAre(&GetValueDefinedAt(param, {1}),
&GetValueDefinedAt(xla_while, {1}),
&GetValueDefinedAt(body_param, {1}),
&GetValueDefinedAt(cond_param, {1}),
&GetValueDefinedAt(add),
&GetValueDefinedAt(negate_2)));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(),
UnorderedElementsAre(
HloPosition{param, {1}}, HloPosition{xla_while, {1}},
HloPosition{while_element_2, {}}, HloPosition{body_param, {1}},
HloPosition{body_element_1, {}}, HloPosition{add, {}},
HloPosition{body_tuple, {1}}, HloPosition{tuple, {1}},
HloPosition{cond_param, {1}}, HloPosition{negate_2, {}}));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SingleCall) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(),
UnorderedElementsAre(HloPosition{constant1, {}},
HloPosition{subparam0, {}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(),
UnorderedElementsAre(HloPosition{constant2, {}},
HloPosition{subparam1, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(add).ComputePositions(),
UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call, {}}));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, ComputationCalledTwice) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call1 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
auto call2 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {call1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(),
UnorderedElementsAre(HloPosition{constant1, {}},
HloPosition{subparam0, {}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(),
UnorderedElementsAre(HloPosition{constant2, {}},
HloPosition{subparam1, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(add).ComputePositions(),
UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call1, {}},
HloPosition{subparam0, {}}, HloPosition{call2, {}}));
EXPECT_THAT(GetBuffersAt(subparam0),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(add)));
EXPECT_THAT(GetBuffersAt(subparam1),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant2)));
EXPECT_TRUE(InstructionBuffersAreAmbiguous(subparam0));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(subparam1));
EXPECT_TRUE(InstructionBuffersAreDistinct(subparam0));
EXPECT_TRUE(InstructionBuffersAreDistinct(subparam1));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SingleWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_tuple = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {}).ComputePositions(),
UnorderedElementsAre(HloPosition{tuple, {}}, HloPosition{xla_while, {}},
HloPosition{body_param, {}},
HloPosition{body_tuple, {}},
HloPosition{cond_param, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {0}).ComputePositions(),
UnorderedElementsAre(
HloPosition{constant1, {}}, HloPosition{tuple, {0}},
HloPosition{xla_while, {0}}, HloPosition{body_param, {0}},
HloPosition{body_element_0, {}}, HloPosition{body_tuple, {0}},
HloPosition{cond_param, {0}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(),
UnorderedElementsAre(
HloPosition{constant2, {}}, HloPosition{tuple, {1}},
HloPosition{xla_while, {1}}, HloPosition{body_param, {1}},
HloPosition{body_element_1, {}}, HloPosition{add, {}},
HloPosition{body_tuple, {1}}, HloPosition{cond_param, {1}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {0}).values(),
UnorderedElementsAre(&GetValueDefinedAt(constant1)));
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(),
UnorderedElementsAre(&GetValueDefinedAt(constant2),
&GetValueDefinedAt(xla_while, {1}),
&GetValueDefinedAt(body_param, {1}),
&GetValueDefinedAt(cond_param, {1}),
&GetValueDefinedAt(add)));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SequentialWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
auto xla_while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0));
auto xla_while2 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1));
module_->AddEntryComputation(builder.Build());
FlattenCallGraph flattener;
TF_ASSERT_OK(flattener.Run(module_.get()).status());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}),
analysis.GetUniqueBufferAt(xla_while2, {}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(xla_while2, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(xla_while2, {1}));
}
TEST_F(HloAliasAnalysisTest, NestedWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto build_cond_computation = [&tuple_shape]() {
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return cond_builder.Build();
};
HloComputation* condition1 =
module_->AddEmbeddedComputation(build_cond_computation());
HloComputation* condition2 =
module_->AddEmbeddedComputation(build_cond_computation());
auto inner_builder = HloComputation::Builder("inner_body");
auto inner_param = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto inner_element_0 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0));
auto inner_element_1 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1));
inner_builder.AddInstruction(
HloInstruction::CreateTuple({inner_element_0, add}));
HloComputation* inner_body =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("outer_body");
auto outer_param = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shap |
1,849 | cpp | tensorflow/tensorflow | xla_debug_info_manager | third_party/xla/xla/service/xla_debug_info_manager.cc | third_party/xla/xla/service/xla_debug_info_manager_test.cc | #ifndef XLA_SERVICE_XLA_DEBUG_INFO_MANAGER_H_
#define XLA_SERVICE_XLA_DEBUG_INFO_MANAGER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "tsl/platform/status.h"
namespace xla {
using ModuleIdentifier = int;
class XlaDebugInfoManager {
public:
static XlaDebugInfoManager* Get() {
static XlaDebugInfoManager* singleton = new XlaDebugInfoManager();
return singleton;
}
void RegisterModule(std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment);
void UnregisterModule(ModuleIdentifier module_id);
void StartTracing();
void StopTracing(
std::vector<std::unique_ptr<HloProto>>* module_debug_info = nullptr);
bool TracksModule(ModuleIdentifier module_id) const;
friend class XlaDebugInfoManagerTestPeer;
private:
XlaDebugInfoManager() = default;
struct XlaModuleEntry {
std::shared_ptr<const HloModule> hlo_module;
BufferAssignmentProto buffer_assignment;
bool active = false;
};
mutable absl::Mutex mutex_;
bool tracing_active_ ABSL_GUARDED_BY(mutex_) = false;
absl::flat_hash_map<ModuleIdentifier, XlaModuleEntry> modules_
ABSL_GUARDED_BY(mutex_);
};
}
#endif
#include "xla/service/xla_debug_info_manager.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_proto_util.h"
namespace xla {
void XlaDebugInfoManager::RegisterModule(
std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment) {
CHECK(hlo_module != nullptr);
absl::MutexLock lock(&mutex_);
auto result = modules_.try_emplace(hlo_module->unique_id());
CHECK(result.second);
XlaModuleEntry& m = result.first->second;
m.hlo_module = std::move(hlo_module);
m.buffer_assignment = std::move(buffer_assignment);
m.active = true;
}
void XlaDebugInfoManager::UnregisterModule(ModuleIdentifier module_id) {
absl::MutexLock lock(&mutex_);
auto it = modules_.find(module_id);
CHECK(it != modules_.end());
if (!tracing_active_) {
modules_.erase(it);
} else {
XlaModuleEntry& m = it->second;
m.active = false;
}
}
void XlaDebugInfoManager::StartTracing() {
absl::MutexLock lock(&mutex_);
tracing_active_ = true;
}
void XlaDebugInfoManager::StopTracing(
std::vector<std::unique_ptr<HloProto>>* module_debug_info) {
std::vector<XlaModuleEntry> modules_to_serialize;
{
absl::MutexLock lock(&mutex_);
if (!tracing_active_) return;
tracing_active_ = false;
modules_to_serialize.reserve(modules_.size());
for (auto it = modules_.begin(); it != modules_.end();) {
auto& m = it->second;
auto cur_it = it++;
if (!m.active) {
modules_to_serialize.emplace_back(std::move(m));
modules_.erase(cur_it);
} else {
modules_to_serialize.emplace_back(m);
}
}
}
if (module_debug_info) {
module_debug_info->clear();
for (const auto& m : modules_to_serialize) {
auto hlo_proto = std::make_unique<HloProto>(MakeHloProto(*m.hlo_module));
*hlo_proto->mutable_buffer_assignment() = m.buffer_assignment;
module_debug_info->emplace_back(std::move(hlo_proto));
}
}
}
bool XlaDebugInfoManager::TracksModule(ModuleIdentifier module_id) const {
absl::MutexLock lock(&mutex_);
return modules_.find(module_id) != modules_.end();
}
} | #include "xla/service/xla_debug_info_manager.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
class XlaDebugInfoManagerTestPeer {
public:
void RegisterModule(std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment) {
return xla_debug_info_manager_.RegisterModule(hlo_module,
std::move(buffer_assignment));
}
void UnregisterModule(ModuleIdentifier module_id) {
return xla_debug_info_manager_.UnregisterModule(module_id);
}
void StartTracing() { return xla_debug_info_manager_.StartTracing(); }
absl::flat_hash_set<ModuleIdentifier> StopTracing() {
std::vector<std::unique_ptr<HloProto>> module_debug_info;
xla_debug_info_manager_.StopTracing(&module_debug_info);
absl::flat_hash_set<ModuleIdentifier> module_ids;
for (const auto& hlo_proto : module_debug_info) {
module_ids.insert(hlo_proto->hlo_module().id());
}
return module_ids;
}
absl::flat_hash_set<ModuleIdentifier> GetModuleIds() {
absl::flat_hash_set<ModuleIdentifier> module_ids;
absl::MutexLock lock(&xla_debug_info_manager_.mutex_);
for (const auto& it : xla_debug_info_manager_.modules_) {
module_ids.insert(it.first);
}
return module_ids;
}
private:
XlaDebugInfoManager xla_debug_info_manager_;
};
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
class XlaDebugInfoManagerTest : public HloTestBase {
protected:
struct DebugMetadata {
ModuleIdentifier unique_id;
std::shared_ptr<HloModule> module;
};
ModuleIdentifier RegisterProgram(const std::string& module_name) {
DebugMetadata debug_info;
HloModuleConfig config;
debug_info.module = std::make_shared<HloModule>(module_name, config);
ModuleIdentifier unique_id = debug_info.module->unique_id();
debug_info.unique_id = unique_id;
xla_debug_info_manager_.RegisterModule(debug_info.module,
BufferAssignmentProto());
external_references_.push_back(std::move(debug_info));
return unique_id;
}
void UnregisterProgram(ModuleIdentifier unique_id) {
for (int i = 0; i < external_references_.size(); i++) {
if (external_references_[i].unique_id == unique_id) {
xla_debug_info_manager_.UnregisterModule(unique_id);
external_references_.erase(external_references_.begin() + i);
break;
}
}
}
absl::flat_hash_set<ModuleIdentifier> GetModuleIds() {
return xla_debug_info_manager_.GetModuleIds();
}
void StartTrace() { xla_debug_info_manager_.StartTracing(); }
absl::flat_hash_set<ModuleIdentifier> StopTrace() {
return xla_debug_info_manager_.StopTracing();
}
std::vector<DebugMetadata> external_references_;
XlaDebugInfoManagerTestPeer xla_debug_info_manager_;
};
TEST_F(XlaDebugInfoManagerTest, NoTraceBasic) {
auto program0 = RegisterProgram("program0");
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0));
auto program1 = RegisterProgram("program1");
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0, program1));
UnregisterProgram(program0);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1));
UnregisterProgram(program1);
EXPECT_TRUE(GetModuleIds().empty());
}
TEST_F(XlaDebugInfoManagerTest, NoTraceDuplicateIds) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
EXPECT_THAT(GetModuleIds(),
UnorderedElementsAre(program0A, program0B, program1));
UnregisterProgram(program1);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A, program0B));
UnregisterProgram(program0A);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B));
UnregisterProgram(program0B);
EXPECT_THAT(GetModuleIds(), IsEmpty());
}
TEST_F(XlaDebugInfoManagerTest, ActiveTrace) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
StartTrace();
auto program2 = RegisterProgram("program2");
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1, program2));
StartTrace();
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1, program2));
UnregisterProgram(program2);
EXPECT_THAT(GetModuleIds(),
UnorderedElementsAre(program0A, program0B, program1));
UnregisterProgram(program0A);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B, program1));
UnregisterProgram(program0B);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1));
UnregisterProgram(program1);
EXPECT_THAT(GetModuleIds(), IsEmpty());
}
TEST_F(XlaDebugInfoManagerTest, UnregisterDuringTrace) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
StartTrace();
UnregisterProgram(program1);
UnregisterProgram(program0B);
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1));
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A));
UnregisterProgram(program0A);
}
}
} |
1,850 | cpp | tensorflow/tensorflow | bfloat16_propagation | third_party/xla/xla/service/bfloat16_propagation.cc | third_party/xla/xla/service/bfloat16_propagation_test.cc | #ifndef XLA_SERVICE_BFLOAT16_PROPAGATION_H_
#define XLA_SERVICE_BFLOAT16_PROPAGATION_H_
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class BFloat16Propagation : public HloModulePass {
public:
explicit BFloat16Propagation(const FloatSupport* bfloat16_support);
~BFloat16Propagation() override = default;
absl::string_view name() const override { return "bfloat16-propagation"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
virtual bool ShouldKeepPrecisionUnchanged(const HloInstruction* inst);
virtual bool InstructionIsCandidateForBF16Output(HloInstruction* hlo);
private:
absl::flat_hash_set<const HloInstruction*> consider_using_bfloat16_;
void DetermineInstructionPrecision(HloInstruction* hlo, bool skip_parameters);
void DetermineFusionComputationPrecision(HloInstruction* fusion);
void RevertIfFusionInternalBF16Changes(HloInstruction* fusion);
void DetermineWhileComputationsPrecision(HloInstruction* while_hlo);
void DetermineConditionalComputationsPrecision(HloInstruction* cond);
absl::flat_hash_set<const HloInstruction*>
instructions_visited_in_backward_pass_;
absl::flat_hash_set<const HloComputation*>
computations_visited_in_backward_pass_;
void ResolveInconsistencyOfAliasingBuffers(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
bool ResolveInconsistencyOfAliasingBuffersHelper(
HloComputation* computation,
absl::flat_hash_set<const HloComputation*>* visited_computations);
void AdjustCalledComputationParameters(HloInstruction* hlo);
void AdjustCalledComputationRoot(HloInstruction* hlo);
absl::Status ResolveInconsistentFusions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::Status ResolveConvertedConstants(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::Status SkipNoopConversions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
bool AllUsersConsumeBF16(const HloInstruction& hlo,
const ShapeIndex& index) const;
PrimitiveType OutputTypeAfterChange(HloInstruction* hlo,
const ShapeIndex& index) const;
PrimitiveType ValueTypeAfterChange(const HloValue* value) const;
void AddToOrRemoveFromBF16ChangeSet(HloInstruction* hlo,
const ShapeIndex& index,
PrimitiveType target_type);
absl::flat_hash_set<const HloValue*> values_that_must_be_kept_as_f32_;
absl::flat_hash_map<const HloComputation*, int64_t> caller_counts_;
absl::flat_hash_map<HloInstruction*, absl::flat_hash_map<Shape*, ShapeIndex>>
changes_to_bf16_;
bool changed_ = false;
const FloatSupport* bfloat16_support_;
std::unique_ptr<HloDataflowAnalysis> dataflow_;
};
}
#endif
#include "xla/service/bfloat16_propagation.h"
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
BFloat16Propagation::BFloat16Propagation(const FloatSupport* bfloat16_support)
: bfloat16_support_(bfloat16_support) {
DCHECK_EQ(bfloat16_support->LowPrecisionType(), BF16);
}
void BFloat16Propagation::DetermineFusionComputationPrecision(
HloInstruction* fusion) {
CHECK_EQ(fusion->opcode(), HloOpcode::kFusion);
if (!bfloat16_support_->SupportsMixedPrecisions(*fusion)) {
return;
}
auto root = fusion->fused_instructions_computation()->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(fusion, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(root, index, BF16);
VLOG(2) << "Fused root " << root->ToString() << " at shape index "
<< index << " changed to BF16 precision for fusion "
<< fusion->ToString();
}
});
auto insts =
fusion->fused_instructions_computation()->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(
fusion->fused_instructions_computation());
RevertIfFusionInternalBF16Changes(fusion);
}
void BFloat16Propagation::RevertIfFusionInternalBF16Changes(
HloInstruction* fusion) {
auto has_changes = [this](HloInstruction* inst) {
auto it = changes_to_bf16_.find(inst);
return it != changes_to_bf16_.end() && !it->second.empty();
};
auto root = fusion->fused_instructions_computation()->root_instruction();
absl::flat_hash_set<const HloValue*> changed_root_buffers;
auto root_changes_it = changes_to_bf16_.find(root);
if (root_changes_it != changes_to_bf16_.end()) {
for (const auto& entry : root_changes_it->second) {
for (const HloValue* value :
dataflow_->GetValueSet(root, entry.second).values()) {
changed_root_buffers.insert(value);
}
}
}
auto aliases_changed_root_buffer = [this, &changed_root_buffers](
const HloInstruction* inst) {
bool aliasing = false;
ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape,
const ShapeIndex& index) {
if (aliasing) {
return;
}
if (subshape.element_type() != F32) {
return;
}
aliasing = absl::c_any_of(dataflow_->GetValueSet(inst, index).values(),
IsValueIn(changed_root_buffers));
});
return aliasing;
};
for (auto inst :
fusion->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kParameter) {
continue;
}
if (aliases_changed_root_buffer(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kFusion) {
bool parameter_reverted = false;
for (int64_t i = 0; i < inst->operand_count(); ++i) {
if (has_changes(inst->mutable_operand(i))) {
continue;
}
auto* fused_parameter = inst->fused_parameter(i);
if (has_changes(fused_parameter)) {
changes_to_bf16_.erase(fused_parameter);
parameter_reverted = true;
}
}
if (parameter_reverted) {
RevertIfFusionInternalBF16Changes(inst);
}
}
if (!has_changes(inst)) {
continue;
}
bool revert_changes = true;
for (auto operand : inst->operands()) {
if (has_changes(operand)) {
revert_changes = false;
break;
}
}
if (revert_changes) {
changes_to_bf16_.erase(inst);
}
}
}
void BFloat16Propagation::DetermineWhileComputationsPrecision(
HloInstruction* while_hlo) {
CHECK_EQ(while_hlo->opcode(), HloOpcode::kWhile);
HloComputation* body = while_hlo->while_body();
auto body_root = body->root_instruction();
HloComputation* condition = while_hlo->while_condition();
ShapeUtil::ForEachSubshape(
body_root->shape(), [this, while_hlo, body_root](
const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(while_hlo, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(body_root, index, BF16);
VLOG(2) << "While body root " << body_root->ToString()
<< " at shape index " << index
<< " changed to BF16 precision for while "
<< while_hlo->ToString();
}
});
auto body_insts = body->MakeInstructionPostOrder();
for (auto inst_it = body_insts.rbegin(); inst_it != body_insts.rend();
++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(body);
auto condition_insts = condition->MakeInstructionPostOrder();
for (auto inst_it = condition_insts.rbegin();
inst_it != condition_insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(condition);
}
void BFloat16Propagation::DetermineConditionalComputationsPrecision(
HloInstruction* cond) {
CHECK_EQ(cond->opcode(), HloOpcode::kConditional);
for (int64_t i = 0; i < cond->branch_count(); ++i) {
auto branch = cond->branch_computation(i);
auto root = branch->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(cond, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(root, index, BF16);
VLOG(2) << "Conditional branch " << i << " root "
<< root->ToString() << " at shape index " << index
<< " changed to BF16 precision for conditional "
<< cond->ToString();
}
});
auto insts = branch->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(branch);
}
}
bool BFloat16Propagation::AllUsersConsumeBF16(const HloInstruction& hlo,
const ShapeIndex& index) const {
const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index);
if (subshape.element_type() != BF16 && subshape.element_type() != F32) {
return false;
}
auto& value_set = dataflow_->GetValueSet(&hlo, index);
for (const HloValue* value : value_set.values()) {
if (ContainsKey(values_that_must_be_kept_as_f32_, value)) {
return false;
}
if (value->shape().element_type() == BF16) {
continue;
}
for (const HloUse& use : value->GetUses()) {
if (!ContainsKey(instructions_visited_in_backward_pass_,
use.instruction)) {
continue;
}
if (use.instruction->HasSideEffectNoRecurse()) {
return false;
}
if (use.instruction->opcode() == HloOpcode::kFusion) {
auto* fused_parameter =
use.instruction->fused_parameter(use.operand_number);
if (OutputTypeAfterChange(fused_parameter, use.operand_index) != BF16) {
return false;
}
continue;
} else if (use.instruction->opcode() == HloOpcode::kWhile) {
auto* cond_parameter =
use.instruction->while_condition()->parameter_instruction(
use.operand_number);
if (OutputTypeAfterChange(cond_parameter, use.operand_index) != BF16) {
return false;
}
auto* body_parameter =
use.instruction->while_body()->parameter_instruction(
use.operand_number);
if (OutputTypeAfterChange(body_parameter, use.operand_index) != BF16) {
return false;
}
continue;
} else if (use.instruction->opcode() == HloOpcode::kConditional) {
auto* cond_parameter =
use.instruction->branch_computation(use.operand_number - 1)
->parameter_instruction(0);
if (OutputTypeAfterChange(cond_parameter, use.operand_index) != BF16) {
return false;
}
continue;
}
if (bfloat16_support_->EffectiveOperandPrecisionIsLowPrecision(
*use.instruction, use.operand_number)) {
continue;
}
if (bfloat16_support_->EffectiveOperandPrecisionIsOutputPrecision(
*use.instruction, use.operand_number)) {
if (use.instruction->opcode() == HloOpcode::kTuple ||
(use.instruction->opcode() == HloOpcode::kAllReduce &&
use.instruction->shape().IsTuple())) {
ShapeIndex use_output_index{use.operand_number};
for (int64_t i : use.operand_index) {
use_output_index.push_back(i);
}
if (OutputTypeAfterChange(use.instruction, use_output_index) ==
BF16) {
continue;
}
} else if (use.instruction->opcode() == HloOpcode::kGetTupleElement) {
ShapeIndex use_output_index;
for (int64_t i = 1; i < use.operand_index.size(); ++i) {
use_output_index.push_back(use.operand_index[i]);
}
if (OutputTypeAfterChange(use.instruction, use_output_index) ==
BF16) {
continue;
}
} else {
if (OutputTypeAfterChange(use.instruction, use.operand_index) ==
BF16) {
continue;
}
}
}
return false;
}
}
return true;
}
bool BFloat16Propagation::ShouldKeepPrecisionUnchanged(
const HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kFusion &&
inst->fusion_kind() == HloInstruction::FusionKind::kCustom) {
return ShouldKeepPrecisionUnchanged(
inst->fused_instructions_computation()->root_instruction());
}
return inst->opcode() == HloOpcode::kCustomCall ||
inst->opcode() == HloOpcode::kCall ||
inst->opcode() == HloOpcode::kBitcastConvert ||
inst->HasSideEffectNoRecurse();
}
void BFloat16Propagation::DetermineInstructionPrecision(HloInstruction* hlo,
bool skip_parameters) {
bool postpone_processing_called_computations = false;
absl::Cleanup cleaner = [this, hlo,
&postpone_processing_called_computations] {
if (!postpone_processing_called_computations) {
if (hlo->opcode() == HloOpcode::kFusion) {
DetermineFusionComputationPrecision(hlo);
} else if (hlo->opcode() == HloOpcode::kWhile) {
DetermineWhileComputationsPrecision(hlo);
} else if (hlo->opcode() == HloOpcode::kConditional) {
DetermineConditionalComputationsPrecision(hlo);
}
}
instructions_visited_in_backward_pass_.insert(hlo);
};
if (hlo->opcode() == HloOpcode::kWhile &&
(caller_counts_[hlo->while_condition()] > 1 ||
caller_counts_[hlo->while_body()] > 1)) {
postpone_processing_called_computations = true;
return;
}
if (hlo->opcode() == HloOpcode::kConditional &&
absl::c_any_of(hlo->branch_computations(), [&](const HloComputation* c) {
return caller_counts_[c] > 1;
})) {
postpone_processing_called_computations = true;
return;
}
CHECK(hlo->parent() != nullptr);
if (hlo == hlo->parent()->root_instruction()) {
if (!hlo->parent()->IsFusionComputation()) {
ShapeUtil::ForEachSubshape(hlo->shape(), [&](const Shape& ,
const ShapeIndex& index) {
if (OutputTypeAfterChange(hlo, index) != F32) {
return;
}
for (const auto* value : dataflow_->GetValueSet(hlo, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
});
}
return;
}
if (ShouldKeepPrecisionUnchanged(hlo) ||
(hlo->opcode() == HloOpcode::kParameter && skip_parameters)) {
return;
}
if (!ContainsKey(consider_using_bfloat16_, hlo)) {
return;
}
if (!bfloat16_support_->SupportsLowPrecisionOutput(*hlo)) {
return;
}
ShapeUtil::ForEachSubshape(
hlo->shape(),
[hlo, this](const Shape& , const ShapeIndex& index) {
if (OutputTypeAfterChange(hlo, index) == F32 &&
AllUsersConsumeBF16(*hlo, index)) {
AddToOrRemoveFromBF16ChangeSet(hlo, index, BF16);
VLOG(2) << "HloInstruction output at shape index " << index
<< " changed to BF16 precision: " << hlo->ToString();
}
});
}
bool BFloat16Propagation::InstructionIsCandidateForBF16Output(
HloInstruction* hlo) {
if (!bfloat16_support_->SupportsMixedPrecisions(*hlo) &&
hlo->opcode() != HloOpcode::kTuple &&
hlo->opcode() != HloOpcode::kGetTupleElement &&
hlo->opcode() != HloOpcode::kDomain &&
hlo->shape().element_type() != BF16) {
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
if (!bfloat16_support_->EffectiveOperandPrecisionIsOutputPrecision(*hlo,
i) ||
!ContainsKey(consider_using_bfloat16_, hlo->operand(i))) {
return false;
}
}
}
return true;
}
void BFloat16Propagation::AdjustCalledComputationParameters(
HloInstruction* hlo) {
auto adjust_computation = [this, hlo](
HloComputation* computation,
absl::Span<HloInstruction* const> operands) {
CHECK_EQ(operands.size(), computation->num_parameters());
for (int64_t i = 0; i < operands.size(); ++i) {
auto parameter = computation->parameter_instruction(i);
ShapeUtil::ForEachSubshape(
parameter->shape(),
[this, i, hlo, &operands, parameter](const Shape& ,
const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(parameter->shape(), index)) {
return;
}
PrimitiveType operand_type =
OutputTypeAfterChange(operands[i], index);
if (OutputTypeAfterChange(parameter, index) == operand_type) {
return;
}
AddToOrRemoveFromBF16ChangeSet(parameter, index, operand_type);
VLOG(2) << "Called computation parameter " << parameter->ToString()
<< " at shape index " << index << " adjusted to "
<< (operand_type == BF16 ? "BF16" : "F32")
<< " to match operand in HLO " << hlo->ToString();
});
}
};
switch (hlo->opcode()) {
case HloOpcode::kFusion:
adjust_computation(hlo->fused_instructions_computation(),
hlo->operands());
break;
case HloOpcode::kWhile:
adjust_computation(hlo->while_condition(), hlo->operands());
adjust_computation(hlo->while_body(), hlo->operands());
break;
case HloOpcode::kConditional:
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
adjust_computation(hlo->branch_computation(i),
{hlo->mutable_operand(i + 1)});
}
break;
default:
break;
}
}
void BFloat16Propagation::AdjustCalledComputationRoot(HloInstruction* hlo) {
auto adjust_computation = [this, hlo](HloComputation* computation,
HloInstruction* output) {
HloInstruction* root = computation->root_instruction();
ShapeUtil::ForEachSubshape(root->shape(), [this, hlo, root, output](
const Shape& ,
const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(hlo->shape(), index)) {
return;
}
const PrimitiveType output_type = OutputTypeAfterChange(output, index);
if (OutputTypeAfterChange(root, index) == output_type) {
return;
}
AddToOrRemoveFromBF16ChangeSet(root, index, output_type);
if (output_type == F32) {
for (const auto* value : dataflow_->GetValueSet(root, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
}
VLOG(2) << "Called computation root " << root->ToString()
<< " at shape index " << index << " adjusted to "
<< (output_type == BF16 ? "BF16" : "F32")
<< " to match output shape of " << hlo->ToString();
});
};
switch (hlo->opcode()) {
case HloOpcode::kFusion:
adjust_computation(hlo->fused_instructions_computation(), hlo);
break;
case HloOpcode::kWhile:
adjust_computation(hlo->while_body(), hlo);
break;
case HloOpcode::kConditional:
for (auto* branch : hlo->branch_computations()) {
adjust_computation(branch, hlo);
}
break;
default:
break;
}
}
bool BFloat16Propagation::ResolveInconsistencyOfAliasingBuffersHelper(
HloComputation* computation,
absl::flat_hash_set<const HloComputation*>* visited_computations) {
bool parameter_changed = false;
auto insts = computation->MakeInstructionPostOrder();
while (true) {
bool any_change = false;
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
auto hlo = *inst_it;
auto adjust_hlo_output = [&](const Shape& ,
const ShapeIn | #include "xla/service/bfloat16_propagation.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
class TestBFloat16Support : public FloatSupport {
public:
TestBFloat16Support() : FloatSupport(BF16) {}
~TestBFloat16Support() override {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return true;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return true;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return true;
}
bool EffectiveOperandPrecisionIsLowPrecision(
const HloInstruction& hlo, int64_t operand_index) const override {
return hlo.opcode() == HloOpcode::kDot;
}
};
class BFloat16PropagationTest : public HloTestBase {
protected:
BFloat16PropagationTest()
: HloTestBase(false,
true) {}
bool PropagatePrecision(HloModule* module) {
TestBFloat16Support bfloat16_support;
BFloat16Propagation propagation(&bfloat16_support);
absl::StatusOr<bool> result = propagation.Run(module);
EXPECT_IS_OK(result.status());
return result.value();
}
bool OutputsBF16(const HloInstruction* inst) {
if (inst->shape().element_type() == BF16) {
return true;
}
return inst->user_count() == 1 &&
inst->users()[0]->opcode() == HloOpcode::kConvert &&
inst->users()[0]->shape().element_type() == BF16;
}
std::unique_ptr<HloInstruction> CreateDot(const Shape& shape,
HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
DefaultPrecisionConfig(2));
}
};
TEST_F(BFloat16PropagationTest, PropagateThroughSelectButNotAdd) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* c =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, b));
HloInstruction* pred = builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {2, 4}), a, b, ComparisonDirection::kEq));
HloInstruction* sel = builder.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kSelect, pred, c, add1));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), sel, {1, 0}));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), xpose, a));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {4, 4}), HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(sel));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_FALSE(OutputsBF16(add0));
EXPECT_FALSE(OutputsBF16(a));
EXPECT_FALSE(OutputsBF16(b));
EXPECT_FALSE(OutputsBF16(c));
}
TEST_F(BFloat16PropagationTest, PropagateThroughMaxPoolReduceWindow) {
auto module = CreateNewVerifiedModule();
auto sub_builder = HloComputation::Builder("max");
HloInstruction* p0 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "a"));
HloInstruction* p1 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "b"));
sub_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, p0, p1));
auto max_computation = module->AddEmbeddedComputation(sub_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* c =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "c"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
HloInstruction* rw =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, add,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32))),
window, max_computation));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), c, {1, 0}));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), xpose, rw));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {4, 4}), HloOpcode::kAdd, dot, dot));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(rw));
}
TEST_F(BFloat16PropagationTest, DoNotChangeAllReduce) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
auto rb = HloComputation::Builder(TestName());
rb.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd,
rb.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0")),
rb.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"))));
auto reduction = module->AddEmbeddedComputation(rb.Build());
HloInstruction* all_reduce =
builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({shape, shape}), {a, b}, reduction,
CollectiveDeviceList(), false,
1, false));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, all_reduce, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, all_reduce, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, gte0, gte1));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
}
TEST_F(BFloat16PropagationTest, ConvertConstantLiteral) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
Array2D<float> array_a(4, 4);
array_a.FillUnique(1.0f);
Array2D<float> array_b(4, 4);
array_b.FillUnique(10.0f);
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_a)));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_b)));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, a, b));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(dot->operand(0)));
EXPECT_TRUE(OutputsBF16(dot->operand(1)));
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kConstant);
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::ConvertF32ToBF16(LiteralUtil::CreateFromArray(array_a)),
dot->operand(0)->literal()));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::ConvertF32ToBF16(LiteralUtil::CreateFromArray(array_b)),
dot->operand(1)->literal()));
}
TEST_F(BFloat16PropagationTest, PropagateThroughTuples) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, a));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, b, b));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), add1, {1, 0}));
HloInstruction* tuple0 =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1, add2}));
HloInstruction* tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({tuple0, xpose}));
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(xpose->shape(), tuple1, 1));
HloInstruction* rhs =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
add0->shape(),
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
tuple0->shape(), tuple1, 0)),
0));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), lhs, rhs));
HloInstruction* output_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({dot, add2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), output_tuple);
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(add0));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_FALSE(OutputsBF16(add2));
}
TEST_F(BFloat16PropagationTest, SameValueReferencedTwice) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, a));
HloInstruction* lhs = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), add1, {1, 0}));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(add1->shape(), tuple, 1));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), lhs, rhs));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_TRUE(OutputsBF16(lhs));
}
TEST_F(BFloat16PropagationTest, DoNotChangeComputationRoot) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, add, add));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add, dot}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), tuple);
EXPECT_FALSE(OutputsBF16(add));
}
TEST_F(BFloat16PropagationTest, PropagateThroughFusion) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f0 = HloComputation::Builder("fusion0");
HloInstruction* a_f0 =
builder_f0.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f0 =
builder_f0.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* tuple_f0 =
builder_f0.AddInstruction(HloInstruction::CreateTuple({a_f0, b_f0}));
auto comp_f0 = module->AddEmbeddedComputation(builder_f0.Build());
auto fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_f0->shape(), HloInstruction::FusionKind::kCustom, {add, add},
comp_f0));
auto builder_f1 = HloComputation::Builder("fusion1");
HloInstruction* p_f1 = builder_f1.AddInstruction(
HloInstruction::CreateParameter(0, tuple_f0->shape(), "param"));
HloInstruction* a_f1 = builder_f1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p_f1, 0));
HloInstruction* b_f1 = builder_f1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p_f1, 1));
HloInstruction* dot = builder_f1.AddInstruction(CreateDot(shape, a_f1, b_f1));
auto comp_f1 = module->AddEmbeddedComputation(builder_f1.Build());
auto fusion1 = builder.AddInstruction(HloInstruction::CreateFusion(
dot->shape(), HloInstruction::FusionKind::kCustom, {fusion0}, comp_f1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), fusion1);
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(a_f0));
EXPECT_TRUE(OutputsBF16(b_f0));
EXPECT_TRUE(OutputsBF16(a_f1));
EXPECT_TRUE(OutputsBF16(b_f1));
}
TEST_F(BFloat16PropagationTest, FusionWithBitcastConvertRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape u32_shape = ShapeUtil::MakeShape(U32, {4, 4});
Shape f32_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, u32_shape, "param"));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f = builder_f.AddInstruction(
HloInstruction::CreateParameter(0, u32_shape, "a"));
HloInstruction* bc_f = builder_f.AddInstruction(
HloInstruction::CreateBitcastConvert(f32_shape, a_f));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
f32_shape, HloInstruction::FusionKind::kLoop, {param}, comp_f));
auto dot = builder.AddInstruction(CreateDot(f32_shape, fusion, fusion));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_EQ(bc_f->shape(), f32_shape);
EXPECT_TRUE(OutputsBF16(bc_f));
}
TEST_F(BFloat16PropagationTest, DiscardFusionInternalBF16Changes) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add_f = builder_f.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a_f, b_f));
HloInstruction* dot_f =
builder_f.AddInstruction(CreateDot(shape, add_f, add_f));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
dot_f->shape(), HloInstruction::FusionKind::kCustom, {add, add}, comp_f));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), fusion);
}
TEST_F(BFloat16PropagationTest, ConvertTupleFusionElementIfUsedByAdd) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f = HloComputation::Builder("fusion0");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add_f = builder_f.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a_f, b_f));
HloInstruction* tuple_f =
builder_f.AddInstruction(HloInstruction::CreateTuple({a_f, add_f}));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_f->shape(), HloInstruction::FusionKind::kCustom, {add, add},
comp_f));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, gte0, gte1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(gte0));
EXPECT_TRUE(OutputsBF16(gte1));
EXPECT_FALSE(OutputsBF16(a_f));
EXPECT_FALSE(OutputsBF16(b_f));
EXPECT_TRUE(OutputsBF16(add_f));
auto new_fusion_root = comp_f->root_instruction();
EXPECT_EQ(new_fusion_root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(new_fusion_root->operand(1), add_f);
EXPECT_EQ(new_fusion_root->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_TRUE(OutputsBF16(new_fusion_root->operand(0)));
}
TEST_F(BFloat16PropagationTest, PropagateThroughSimpleWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
auto cond_dot =
builder_cond.AddInstruction(CreateDot(shape, cond_param, cond_param));
auto cond_root = builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, shape, "body_param"));
auto body_dot =
builder_body.AddInstruction(CreateDot(shape, body_param, body_param));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(shape, cond, body, add));
auto dot = builder.AddInstruction(CreateDot(shape, while_hlo, while_hlo));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(
ShapeUtil::Equal(cond_root->shape(), ShapeUtil::MakeShape(PRED, {})));
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(body_dot));
EXPECT_TRUE(OutputsBF16(body_param));
EXPECT_TRUE(OutputsBF16(cond_param));
EXPECT_FALSE(OutputsBF16(dot));
}
TEST_F(BFloat16PropagationTest,
ConditionPreventsPropagationForFusionInsideWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1, 1}), cond_param, {0, 0}, {1, 1},
{1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1, 1}), cond_param, {1, 1}, {2, 2},
{1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, shape, "body_param"));
auto body_transpose = builder_body.AddInstruction(
HloInstruction::CreateTranspose(shape, body_param, {0, 1}));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
builder_f.AddInstruction(HloInstruction::CreateTranspose(shape, a_f, {0, 1}));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto body_fusion = builder_body.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {body_transpose}, comp_f));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(shape, cond, body, add));
auto dot = builder.AddInstruction(CreateDot(shape, while_hlo, while_hlo));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_FALSE(OutputsBF16(add));
EXPECT_FALSE(OutputsBF16(body_fusion));
EXPECT_FALSE(OutputsBF16(body_param));
EXPECT_FALSE(OutputsBF16(body_transpose));
EXPECT_FALSE(OutputsBF16(a_f));
}
TEST_F(BFloat16PropagationTest, PropagateThroughTupleWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, tuple->shape(), "cond_param"));
auto cond_lhs = builder_cond.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond_param, 0));
auto cond_rhs = builder_cond.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond_param, 1));
auto cond_add_rhs = builder_cond.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, cond_rhs, cond_rhs));
auto cond_dot =
builder_cond.AddInstruction(CreateDot(shape, cond_lhs, cond_add_rhs));
builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, tuple->shape(), "body_param"));
auto body_lhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
auto body_rhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 1));
auto body_dot1 =
builder_body.AddInstruction(CreateDot(shape, body_lhs, body_rhs));
auto body_dot2 =
builder_body.AddInstruction(CreateDot(shape, body_rhs, body_lhs));
auto body_transpose = builder_body.AddInstruction(
HloInstruction::CreateTranspose(shape, body_dot2, {0, 1}));
builder_body.AddInstruction(
HloInstruction::CreateTuple({body_dot1, body_transpose}));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(tuple->shape(), cond, body, tuple));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_hlo, 0)); |
1,851 | cpp | tensorflow/tensorflow | hlo_replication_analysis | third_party/xla/xla/service/hlo_replication_analysis.cc | third_party/xla/xla/service/hlo_replication_analysis_test.cc | #ifndef XLA_SERVICE_HLO_REPLICATION_ANALYSIS_H_
#define XLA_SERVICE_HLO_REPLICATION_ANALYSIS_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/xla_data.pb.h"
namespace xla {
class HloReplicationAnalysis {
public:
static absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>> Run(
const HloModule* module, bool cross_partition_spmd);
static absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>> Run(
const HloModule* module, bool cross_partition_spmd,
const absl::flat_hash_set<const HloInstruction*>*
loops_known_with_same_iterations);
static absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
RunWithPartialReplication(const HloModule* module, bool cross_partition_spmd);
bool HloInstructionIsReplicatedAt(const HloInstruction* inst,
const ShapeIndex& index) const;
bool HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index,
absl::Span<const ReplicaGroup> replica_groups) const;
private:
class HloReplication {
public:
static HloReplication ReplicatedOnAllDevices();
static HloReplication UniqueOnAllDevices();
static HloReplication PartiallyReplicated(
absl::Span<const absl::Span<const int64_t>> device_sets);
HloReplication();
HloReplication(const HloReplication& other) = default;
HloReplication(HloReplication&& other) = default;
HloReplication& operator=(HloReplication&& other) = default;
HloReplication Merge(const HloReplication& other) const;
bool Equal(const HloReplication& other) const;
bool IsReplicatedOnAllDevices() const;
bool IsUniqueOnAllDevices() const;
bool IsReplicatedWithinSubgroup(absl::Span<const int64_t> device_ids) const;
std::string ToString() const;
private:
enum class State {
kReplicatedOnAllDevices = 0,
kUniqueOnAllDevices = 1,
kPartiallyReplicated = 2,
};
explicit HloReplication(State state,
absl::Span<const int64_t> device_set_root);
State state_;
std::vector<int64_t> device_set_root_;
};
static HloReplication DetermineHloInstructionIsReplicated(
const HloInstruction* hlo, const ShapeIndex& index,
bool cross_partition_spmd,
const absl::flat_hash_map<const HloInstruction*,
ShapeTree<HloReplication>>& hlo_replication,
bool support_partial_replication);
HloReplicationAnalysis(const HloModule* module, bool cross_partition_spmd,
const absl::flat_hash_set<const HloInstruction*>*
loops_known_with_same_iterations,
bool support_partial_replication)
: module_(module),
cross_partition_spmd_(cross_partition_spmd),
loops_known_with_same_iterations_(*loops_known_with_same_iterations),
support_partial_replication_(support_partial_replication) {}
absl::Status ComputeHloReplication();
bool ComputeHloReplicationOnComputation(const HloComputation* computation,
bool mark_everything_not_replicated);
const HloModule* module_;
bool cross_partition_spmd_;
const absl::flat_hash_set<const HloInstruction*>&
loops_known_with_same_iterations_;
const bool support_partial_replication_;
absl::flat_hash_map<const HloInstruction*, ShapeTree<HloReplication>>
hlo_replication_;
};
}
#endif
#include "xla/service/hlo_replication_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::DetermineHloInstructionIsReplicated(
const HloInstruction* hlo, const ShapeIndex& index,
bool cross_partition_spmd,
const absl::flat_hash_map<const HloInstruction*, ShapeTree<HloReplication>>&
hlo_replication,
bool support_partial_replication) {
const auto merge_operand_replication = [&hlo_replication](
const HloInstruction* inst) {
HloReplication replication = HloReplication::ReplicatedOnAllDevices();
for (auto operand : inst->operands()) {
auto operand_it = hlo_replication.find(operand);
if (operand_it == hlo_replication.end()) {
replication = replication.Merge(HloReplication::UniqueOnAllDevices());
} else {
replication = replication.Merge(operand_it->second.element({}));
}
}
return replication;
};
if (hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kAllGather) {
HloReplication replication = merge_operand_replication(hlo);
if (replication.IsReplicatedOnAllDevices()) {
return replication;
}
if (!hlo->channel_id().has_value()) {
if (cross_partition_spmd) {
return replication;
}
if (hlo->replica_groups().empty() || hlo->replica_groups().size() == 1) {
return HloReplication::ReplicatedOnAllDevices();
}
if (support_partial_replication) {
std::vector<absl::Span<const int64_t>> device_sets;
for (const ReplicaGroup& replica_group : hlo->replica_groups()) {
device_sets.push_back(replica_group.replica_ids());
}
return HloReplication::PartiallyReplicated(device_sets);
} else {
return HloReplication::UniqueOnAllDevices();
}
} else {
bool global_id;
if (hlo->opcode() == HloOpcode::kAllReduce) {
global_id = Cast<HloAllReduceInstruction>(hlo)->use_global_device_ids();
} else {
global_id = Cast<HloAllGatherInstruction>(hlo)->use_global_device_ids();
}
if (global_id) {
bool replicated_across_partitions = true;
bool replicated_across_replicas = true;
const int64_t num_partitions =
hlo->GetModule()->config().num_partitions();
absl::flat_hash_set<int64_t> visited_partitions;
absl::flat_hash_set<int64_t> visited_replicas;
for (const auto& group : hlo->replica_groups()) {
visited_partitions.clear();
visited_replicas.clear();
visited_replicas.reserve(group.replica_ids().size());
visited_partitions.reserve(group.replica_ids().size());
for (int64_t id : group.replica_ids()) {
int64_t rid = id / num_partitions;
int64_t pid = id % num_partitions;
visited_partitions.insert(pid);
visited_replicas.insert(rid);
}
replicated_across_partitions &=
visited_partitions.size() == num_partitions;
replicated_across_replicas &=
visited_replicas.size() ==
hlo->GetModule()->config().replica_count();
}
if ((cross_partition_spmd && replicated_across_partitions) ||
(!cross_partition_spmd && replicated_across_replicas)) {
return HloReplication::ReplicatedOnAllDevices();
} else {
return HloReplication::UniqueOnAllDevices();
}
}
if (cross_partition_spmd) {
return HloReplication::ReplicatedOnAllDevices();
}
if (hlo->replica_groups().empty() || hlo->replica_groups().size() == 1) {
return HloReplication::ReplicatedOnAllDevices();
} else {
return HloReplication::UniqueOnAllDevices();
}
}
}
if (hlo->HasSideEffectNoRecurse()) {
return HloReplication::UniqueOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kReplicaId) {
return cross_partition_spmd ? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kPartitionId) {
return cross_partition_spmd ? HloReplication::UniqueOnAllDevices()
: HloReplication::ReplicatedOnAllDevices();
}
auto it = hlo_replication.find(hlo);
if (hlo->opcode() == HloOpcode::kParameter) {
CHECK(it != hlo_replication.end());
return it->second.element(index);
}
if (it != hlo_replication.end() &&
it->second.element(index).IsUniqueOnAllDevices()) {
return it->second.element(index);
}
if (hlo->opcode() == HloOpcode::kConstant) {
return HloReplication::ReplicatedOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kCustomCall &&
(hlo->custom_call_target() == "X64SplitLow" ||
hlo->custom_call_target() == "X64SplitHigh" ||
hlo->custom_call_target() == "X64Combine")) {
return merge_operand_replication(hlo);
}
if (support_partial_replication) {
if (hlo->opcode() == HloOpcode::kDynamicSlice) {
const HloInstruction* ds_buffer = hlo->operand(0);
if (hlo->dynamic_slice_sizes().size() == 1 &&
hlo->dynamic_slice_sizes()[0] == 1 &&
ds_buffer->opcode() == HloOpcode::kConstant &&
ds_buffer->shape().rank() == 1 &&
ds_buffer->shape().element_type() == PrimitiveType::S32 &&
((cross_partition_spmd &&
hlo->operand(1)->opcode() == HloOpcode::kPartitionId) ||
(!cross_partition_spmd &&
hlo->operand(1)->opcode() == HloOpcode::kReplicaId))) {
const HloModule* hlo_module = hlo->GetModule();
int64_t num_devices = cross_partition_spmd
? hlo_module->config().num_partitions()
: hlo_module->config().replica_count();
absl::flat_hash_map<int64_t, std::vector<int64_t>> value_to_device_set;
for (int64_t device_id = 0; device_id < num_devices; ++device_id) {
std::optional<int64_t> value =
ds_buffer->literal().GetIntegralAsS64({device_id});
value_to_device_set[*value].push_back(device_id);
}
std::vector<absl::Span<const int64_t>> device_sets;
for (const auto& value_and_device_set : value_to_device_set) {
device_sets.push_back(
absl::Span<const int64_t>(value_and_device_set.second));
}
return HloReplication::PartiallyReplicated(device_sets);
}
}
}
if (hlo->IsElementwise() ||
hlo->opcode() == HloOpcode::kConcatenate ||
hlo->opcode() == HloOpcode::kConvolution ||
hlo->opcode() == HloOpcode::kDot ||
hlo->opcode() == HloOpcode::kReduce ||
hlo->opcode() == HloOpcode::kBroadcast ||
hlo->opcode() == HloOpcode::kTranspose ||
hlo->opcode() == HloOpcode::kReshape ||
hlo->opcode() == HloOpcode::kBitcast ||
hlo->opcode() == HloOpcode::kReverse ||
hlo->opcode() == HloOpcode::kGather ||
hlo->opcode() == HloOpcode::kScatter ||
hlo->opcode() == HloOpcode::kIota ||
hlo->opcode() == HloOpcode::kPad ||
hlo->opcode() == HloOpcode::kSlice ||
hlo->opcode() == HloOpcode::kDynamicSlice ||
hlo->opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo->opcode() == HloOpcode::kReduceWindow ||
hlo->opcode() == HloOpcode::kCopy) {
return merge_operand_replication(hlo);
}
return HloReplication::UniqueOnAllDevices();
}
bool HloReplicationAnalysis::ComputeHloReplicationOnComputation(
const HloComputation* computation, bool mark_everything_not_replicated) {
bool changed = false;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
auto assign_or_combine_shapetree =
[&](ShapeTree<HloReplication>&& to_combine,
const HloInstruction* dest) {
auto it = hlo_replication_.find(dest);
if (it == hlo_replication_.end()) {
hlo_replication_[dest] = std::move(to_combine);
return true;
}
bool updated = false;
it->second.ForEachMutableElement(
[&](const ShapeIndex& index, HloReplication* element) {
HloReplication new_replication =
element->Merge(to_combine.element(index));
if (!element->Equal(new_replication)) {
*element = std::move(new_replication);
updated = true;
}
});
return updated;
};
auto propagate_shapetree = [&](const HloInstruction* source,
const HloInstruction* dest) {
auto source_it = hlo_replication_.find(source);
if (source_it == hlo_replication_.end()) {
return false;
}
return assign_or_combine_shapetree(
ShapeTree<HloReplication>(source_it->second), dest);
};
if (inst->opcode() == HloOpcode::kWhile) {
while (true) {
bool updated = propagate_shapetree(
inst->operand(0),
inst->while_condition()->parameter_instruction(0));
updated |= propagate_shapetree(
inst->while_body()->root_instruction(),
inst->while_condition()->parameter_instruction(0));
updated |= propagate_shapetree(
inst->operand(0), inst->while_body()->parameter_instruction(0));
updated |=
propagate_shapetree(inst->while_body()->root_instruction(),
inst->while_body()->parameter_instruction(0));
updated |= ComputeHloReplicationOnComputation(
inst->while_condition(), mark_everything_not_replicated);
if (!ContainsKey(loops_known_with_same_iterations_, inst) &&
!hlo_replication_[inst->while_condition()->root_instruction()]
.element({})
.IsReplicatedOnAllDevices()) {
updated |= ComputeHloReplicationOnComputation(
inst->while_body(), true);
} else {
updated |= ComputeHloReplicationOnComputation(
inst->while_body(), mark_everything_not_replicated);
}
if (!updated) {
break;
}
changed = true;
}
changed |= propagate_shapetree(inst->operand(0), inst);
changed |=
propagate_shapetree(inst->while_body()->root_instruction(), inst);
} else if (inst->opcode() == HloOpcode::kCall ||
inst->opcode() == HloOpcode::kFusion) {
auto called = inst->called_computations().front();
for (int64_t i = 0; i < inst->operand_count(); ++i) {
changed |= propagate_shapetree(inst->operand(i),
called->parameter_instruction(i));
}
changed |= ComputeHloReplicationOnComputation(
called, mark_everything_not_replicated);
changed |= propagate_shapetree(called->root_instruction(), inst);
} else if (inst->opcode() == HloOpcode::kConditional) {
for (int64_t i = 0; i < inst->called_computations().size(); ++i) {
changed |= propagate_shapetree(
inst->operand(i + 1),
inst->called_computations()[i]->parameter_instruction(0));
}
if (!hlo_replication_[inst->operand(0)]
.element({})
.IsReplicatedOnAllDevices()) {
for (auto called : inst->called_computations()) {
changed |= ComputeHloReplicationOnComputation(
called,
true);
}
changed |= assign_or_combine_shapetree(
ShapeTree<HloReplication>(inst->shape(),
HloReplication::UniqueOnAllDevices()),
inst);
} else {
for (auto called : inst->called_computations()) {
changed |= ComputeHloReplicationOnComputation(
called, mark_everything_not_replicated);
changed |= propagate_shapetree(called->root_instruction(), inst);
}
}
} else if (inst->opcode() == HloOpcode::kTuple) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
for (int64_t i = 0; i < inst->operand_count(); ++i) {
shape_tree.CopySubtreeFrom(hlo_replication_[inst->operand(i)], {}, {i});
}
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kOptimizationBarrier) {
ShapeTree<HloReplication> shape_tree = hlo_replication_[inst->operand(0)];
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kGetTupleElement) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
shape_tree.CopySubtreeFrom(hlo_replication_[inst->operand(0)],
{inst->tuple_index()}, {});
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kInfeed && cross_partition_spmd_) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::UniqueOnAllDevices());
if (inst->has_sharding()) {
auto sharding = inst->sharding().GetAsShapeTree(inst->shape());
shape_tree.ForEachMutableElement(
[&sharding](const ShapeIndex& index, HloReplication* data) {
*data = sharding.element(index).IsReplicated()
? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
});
}
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else {
if (mark_everything_not_replicated) {
changed |= assign_or_combine_shapetree(
ShapeTree<HloReplication>(inst->shape(),
HloReplication::UniqueOnAllDevices()),
inst);
} else {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
ShapeUtil::ForEachSubshape(
inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
*shape_tree.mutable_element(index) =
DetermineHloInstructionIsReplicated(
inst, index, cross_partition_spmd_, hlo_replication_,
support_partial_replication_);
});
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
}
}
}
return changed;
}
absl::Status HloReplicationAnalysis::ComputeHloReplication() {
auto entry = module_->entry_computation();
for (int i = 0; i < entry->num_parameters(); ++i) {
auto param = entry->parameter_instruction(i);
ShapeTree<HloReplication> shape_tree(param->shape(),
HloReplication::UniqueOnAllDevices());
const auto& replication = param->parameter_replicated_at_leaf_buffers();
int leaf_index = 0;
absl::Status status = ShapeUtil::ForEachSubshapeWithStatus(
param->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(param->shape(), index)) {
return absl::OkStatus();
}
if (cross_partition_spmd_ && param->has_sharding()) {
TF_ASSIGN_OR_RETURN(auto sharding_tree,
param->sharding().AsShapeTree(param->shape()));
*shape_tree.mutable_element(index) =
sharding_tree.element(index).IsReplicated()
? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
}
if (replication) {
if (!cross_partition_spmd_ && (*replication)[leaf_index]) {
*shape_tree.mutable_element(index) =
HloReplication::ReplicatedOnAllDevices();
}
if (cross_partition_spmd_ && !(*replication)[leaf_index]) {
*shape_tree.mutable_element(index) =
HloReplication::UniqueOnAllDevices();
}
++leaf_index;
}
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
hlo_replication_[param] = std::move(shape_tree);
}
ComputeHloReplicationOnComputation(entry,
false);
return absl::OkStatus();
}
bool HloReplicationAnalysis::HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index) const {
auto it = hlo_replication_.find(inst);
if (it == hlo_replication_.end()) {
return false;
}
return it->second.element(index).IsReplicatedOnAllDevices();
}
bool HloReplicationAnalysis::HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index,
absl::Span<const ReplicaGroup> replica_groups) const {
auto it = hlo_replication_.find(inst);
if (it == hlo_replication_.end()) {
return false;
}
VLOG(5) << "HloInstructionIsReplicatedAt is called on " << inst->name()
<< ", index: " << index.ToString()
<< ", replication: " << it->second.element(index).ToString();
if (replica_groups.empty()) {
return it->second.element(index).IsReplicatedOnAllDevices();
}
if (it->second.element(index).IsReplicatedOnAllDevices()) {
return true;
}
if (it->second.element(index).IsUniqueOnAllDevices()) {
return false;
}
for (const ReplicaGroup& replica_group : replica_groups) {
if (!it->second.element(index).IsReplicatedWithinSubgroup(
replica_group.replica_ids())) {
return false;
}
}
return true;
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::Run(const HloModule* module,
bool cross_partition_spmd) {
const absl::flat_hash_set<const HloInstruction*> empty;
return Run(module, cross_partition_spmd, &empty);
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::Run(const HloModule* module, bool cross_partition_spmd,
const absl::flat_hash_set<const HloInstruction*>*
loops_known_with_same_iterations) {
auto analysis = absl::WrapUnique(new HloReplicationAnalysis(
module, cross_partition_spmd, loops_known_with_same_iterations,
false));
TF_RETURN_IF_ERROR(analysis->ComputeHloReplication());
return analysis;
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::RunWithPartialReplication(const HloModule* module,
bool cross_partition_spmd) {
const absl::flat_hash_set<const HloInstruction*> empty;
auto analysis = absl::WrapUnique(
new HloReplicationAnalysis(module, cross_partition_spmd, &empty,
true));
TF_RETURN_IF_ERROR(analysis->ComputeHloReplication());
return analysis;
}
HloReplicationAnalysis::HloReplication::HloReplication()
: state_(State::kReplicatedOnAllDevices) {}
HloReplicationAnalysis::HloReplication::HloReplication(
HloReplicationAnalysis::HloReplication::State state,
absl::Span<const int64_t> device_set_root)
: state_(state),
device_set_root_(device_set_root.begin(), device_set_root.end()) {
CHECK(state == State::kPartiallyReplicated || device_set_root_.empty());
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::ReplicatedOnAllDevices() {
return HloReplication(State::kReplicatedOnAllDevices, {});
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::UniqueOnAllDevices() {
return HloReplication(State::kUniqueOnAllDevices, {});
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::PartiallyReplicated(
absl::Span<const absl::Span<const int64_t>> device_sets) {
int64_t max_device_id = 0;
for (const absl::Span<const int64_t>& device_set : device_sets) {
for (int64_t device_id : device_set) {
max_device_id = std::max(max_device_id, device_id);
}
}
std::vector<int64_t> device_set_root;
device_set_root.resize(max_device_id + 1);
for (const absl::Span<const int64_t>& device_set : device_sets) {
int64_t min_device_id = *absl::c_min_element(device_set);
for (int64_t device_id : device_set) {
device_set_root[device_id] = min_device_id;
}
}
return HloReplication(State::kPartiallyReplicated, device_set_root);
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::Merge(
const HloReplication& other) const {
switch (state_) {
case State::kReplicatedOnAllDevices:
return other;
case State::kUniqueOnAllDevices:
return *this;
case State::kPartiallyReplicated: {
switch (other.state_) {
case State::kReplicatedOnAllDevices:
return *this;
case State::kUniqueOnAllDevices:
return other;
case State::kPartiallyReplicated: {
absl::flat_hash_map<int64_t, std::vector<int64_t>>
value_to_device_set;
size_t num_devices = device_set_root_.size();
for (int64_t device_id = 0; device_id < num_devices; ++device_id) {
int64_t new_value = device_set_root_[device_id] * num_devices +
other.device_set_root_[device_id];
value_to_device_set[new_value].push_back(device_id);
}
CHECK_LE(value_to_device_set.size(), num_devices);
if (value_to_device_set.size() == 1) {
return ReplicatedOnAllDevices();
} else if (value_to_device_set.size() < num_devices) {
std::vector<absl::Span<const int64_t>> device_sets;
for (const auto& value_and_device_set : value_to_device_set) {
device_sets.push_back(
absl::Span<const int64_t>(value_and_device_set.seco | #include "xla/service/hlo_replication_analysis.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class HloReplicationAnalysisTest : public HloTestBase {};
TEST_F(HloReplicationAnalysisTest, NoControlFlow) {
const std::string module_str = R"(
HloModule NoControlFlow
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
sum.u32 {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY entry {
param = (f32[4096,4096]{1,0}, f32[4096,4096]{1,0}) parameter(0)
get-tuple-element.2 = f32[4096,4096]{1,0} get-tuple-element(param), index=0
get-tuple-element.3 = f32[4096,4096]{1,0} get-tuple-element(param), index=1
after-all.1 = token[] after-all()
replica-id = u32[] replica-id()
infeed = (f32[4096,4096]{1,0}, token[]) infeed(after-all.1)
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed), index=0
dot = f32[4096,4096]{1,0} dot(get-tuple-element.5, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
all-reduce = f32[4096,4096]{1,0} all-reduce(dot), replica_groups={},
to_apply=sum
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.3, all-reduce)
all-reduce-partitions = u32[] all-reduce(replica-id), channel_id=1,
to_apply=sum.u32, replica_groups={{0},{1},{2},{3}}
all-reduce-subgroup = u32[] all-reduce(replica-id),
replica_groups={{0,1},{2,3}}, to_apply=sum.u32
ROOT add = f32[4096,4096]{1,0} add(get-tuple-element.2, subtract)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 4));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{false, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.3"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "replica-id"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-subgroup"), {}));
}
TEST_F(HloReplicationAnalysisTest, NoControlFlowSPMD) {
const std::string module_str = R"(
HloModule NoControlFlow
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
sum.u32 {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY entry {
param = (f32[4096,4096]{1,0}, f32[4096,4096]{1,0}, f32[4096,4096]{1,0})
parameter(0), sharding={{maximal device=0}, {replicated}, {replicated}}
get-tuple-element.2 = f32[4096,4096]{1,0} get-tuple-element(param), index=0
get-tuple-element.3 = f32[4096,4096]{1,0} get-tuple-element(param), index=1
get-tuple-element.4 = f32[4096,4096]{1,0} get-tuple-element(param), index=2
after-all.1 = token[] after-all()
replica-id = u32[] replica-id()
partition-id = u32[] partition-id()
infeed = ((f32[4096,4096]{1,0}, f32[8,8]{1,0}), token[]) infeed(after-all.1),
sharding={{maximal device=0}, {replicated}, {maximal device=0}}
infeed-data = (f32[4096,4096]{1,0}, f32[8,8]{1,0}) get-tuple-element(infeed),
index=0
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed-data),
index=0
get-tuple-element.6 = f32[8,8]{1,0} get-tuple-element(infeed-data), index=1
dot = f32[4096,4096]{1,0} dot(get-tuple-element.5, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot.2 = f32[4096,4096]{1,0} dot(get-tuple-element.4, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
all-reduce = f32[4096,4096]{1,0} all-reduce(dot), replica_groups={},
to_apply=sum
all-reduce.2 = f32[4096,4096]{1,0} all-reduce(dot.2), replica_groups={},
to_apply=sum
all-reduce-subgroup = f32[4096,4096]{1,0} all-reduce(dot),
replica_groups={{0,1},{2,3}}, to_apply=sum
all-reduce-partitions = f32[4096,4096]{1,0} all-reduce(get-tuple-element.2),
channel_id=1, to_apply=sum
all-reduce-partitions.2 = f32[4096,4096]{1,0} all-reduce(get-tuple-element.4),
channel_id=1, to_apply=sum
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.3,
all-reduce-partitions)
subtract.2 = f32[4096,4096]{1,0} subtract(get-tuple-element.3,
all-reduce-partitions.2)
all-reduce-same-operand = u32[] all-reduce(replica-id), to_apply=sum.u32
all-reduce-same-operand-subgroup = u32[] all-reduce(replica-id),
replica_groups={{0,1},{2,3}}, to_apply=sum.u32
all-reduce-different-operand = u32[] all-reduce(partition-id),
to_apply=sum.u32
add = f32[4096,4096]{1,0} add(get-tuple-element.2, subtract)
ROOT add.2 = f32[4096,4096]{1,0} add(get-tuple-element.4, subtract.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 4));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{false, true, false});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(module.get(), true));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.3"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.4"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.6"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot.2"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract.2"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "replica-id"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "partition-id"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-same-operand"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-same-operand-subgroup"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-different-operand"), {}));
}
TEST_F(HloReplicationAnalysisTest, NestedCall) {
const std::string module_str = R"(
HloModule NestedCall
fusion_computation {
fusion_p0 = f32[] parameter(0)
fusion_p1 = f32[] parameter(1)
add = f32[] add(fusion_p0, fusion_p0)
multiply = f32[] multiply(add, fusion_p1)
ROOT tuple = (f32[], f32[]) tuple(add, multiply)
}
call_body {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT fusion = (f32[], f32[]) fusion(a, b), kind=kLoop, calls=fusion_computation
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ROOT call = (f32[], f32[]) call(get-tuple-element, get-tuple-element.1), to_apply=call_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, false});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "multiply"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "fusion"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "fusion"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "call"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "call"), {1}));
}
TEST_F(HloReplicationAnalysisTest, SimpleWhileLoop) {
const std::string module_str = R"(
HloModule SimpleWhileLoop
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
constant.1 = u32[] constant(1)
add = u32[] add(get-tuple-element.6, constant.1)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(multiply, add)
}
ENTRY SimpleWhileLoop {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest,
WhileLoopParameterAliasingNonReplicatedOutput) {
const std::string module_str = R"(
HloModule WhileLoopParameterAliasingNonReplicatedOutput
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
after-all.1 = token[] after-all()
infeed = (f32[4096,4096]{1,0}, token[]) infeed(after-all.1)
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed), index=0
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.5, multiply)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
constant.1 = u32[] constant(1)
add = u32[] add(get-tuple-element.6, constant.1)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(subtract, add)
}
ENTRY WhileLoopParameterAliasingNonReplicatedOutput {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "multiply"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest, WhileLoopDifferentCondition) {
const std::string module_str = R"(
HloModule WhileLoopDifferentCondition
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
replica-id = u32[] replica-id()
add = u32[] add(get-tuple-element.6, replica-id)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(multiply, add)
}
ENTRY WhileLoopDifferentCondition {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest, SimpleConditional) {
const std::string module_str = R"(
HloModule SimpleConditional
Negate {
x = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(x), index=0
negate = f32[] negate(get-tuple-element)
get-tuple-element.1 = f32[] get-tuple-element(x), index=1
negate.1 = f32[] negate(get-tuple-element.1)
ROOT tuple = (f32[], f32[]) tuple(negate, negate.1)
}
Identity {
ROOT y = (f32[], f32[]) parameter(0)
}
Floor {
z = (f32[], f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(z), index=0
floor = f32[] floor(get-tuple-element.2)
get-tuple-element.3 = f32[] get-tuple-element(z), index=1
floor.1 = f32[] floor(get-tuple-element.3)
ROOT tuple.1 = (f32[], f32[]) tuple(floor, floor.1)
}
ENTRY entry {
param = ((f32[], f32[]), (f32[], f32[]), (f32[], f32[]), s32[]) parameter(0)
get-tuple-element.4 = (f32[], f32[]) get-tuple-element(param), index=0
get-tuple-element.5 = (f32[], f32[]) get-tuple-element(param), index=1
get-tuple-element.6 = (f32[], f32[]) get-tuple-element(param), index=2
get-tuple-element.7 = s32[] get-tuple-element(param), index=3
ROOT conditional = (f32[], f32[]) conditional(get-tuple-element.7, get-tuple-element.4, get-tuple-element.5, get-tuple-element.6), branch_computations={Negate, Identity, Floor}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true, true, true, false, true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {1}));
}
TEST_F(HloReplicationAnalysisTest, ConditionalWithDifferentPredicates) {
const std::string module_str = R"(
HloModule ConditionalWithDifferentPredicates
Negate {
x = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(x), index=0
negate = f32[] negate(get-tuple-element)
get-tuple-element.1 = f32[] get-tuple-element(x), index=1
negate.1 = f32[] negate(get-tuple-element.1)
ROOT tuple = (f32[], f32[]) tuple(negate, negate.1)
}
Identity {
ROOT y = (f32[], f32[]) parameter(0)
}
Floor {
z = (f32[], f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(z), index=0
floor = f32[] floor(get-tuple-element.2)
get-tuple-element.3 = f32[] get-tuple-element(z), index=1
floor.1 = f32[] floor(get-tuple-element.3)
ROOT tuple.1 = (f32[], f32[]) tuple(floor, floor.1)
}
ENTRY entry {
param = ((f32[], f32[]), (f32[], f32[]), (f32[], f32[])) parameter(0)
get-tuple-element.4 = (f32[], f32[]) get-tuple-element(param), index=0
get-tuple-element.5 = (f32[], f32[]) get-tuple-element(param), index=1
get-tuple-element.6 = (f32[], f32[]) get-tuple-element(param), index=2
replica-id = u32[] replica-id()
id = s32[] bitcast-convert(replica-id)
ROOT conditional = (f32[], f32[]) conditional(id, get-tuple-element.4,
get-tuple-element.5, get-tuple-element.6),
branch_computations={Negate, Identity, Floor}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true, true, true, true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {1}));
}
TEST_F(HloReplicationAnalysisTest, X64SplitCombine) {
const std::string module_str = R"(
HloModule SimpleX64SplitCombine
ENTRY entry {
param = (f64[]) parameter(0)
gte = f64[] get-tuple-element(param), index=0
param-low = f32[] custom-call(gte), custom_call_target="X64SplitLow"
param-high = f32[] custom-call(gte), custom_call_target="X64SplitHigh"
ROOT result-combine = f64[] custom-call(param-low, param-high), custom_call_target="X64Combine"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(absl::Span<const bool>{true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "param-low"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "param-high"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "result-combine"), {}));
}
TEST_F(HloReplicationAnalysisTest, CrossModuleAndReplicaAllReduce) {
const std::string module_str = R"(
HloModule CrossModuleAndReplicaAllReduce
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element.0 = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ar0 = f32[] all-reduce(get-tuple-element.0), to_apply=sum, replica_groups={{0,1}}
ar1 = f32[] all-reduce(get-tuple-element.1), to_apply=sum, replica_groups={{0},{1}}
ROOT tuple = (f32[], f32[]) tuple(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 2));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ar0"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ar1"), {}));
}
TEST_F(HloReplicationAnalysisTest, GlobalIdAllGather) {
const std::string module_str = R"(
HloModule GlobalIdAllGather
ENTRY entry {
param = f32[1] parameter(0)
ag1 = f32[2] all-gather(param), replica_groups={{0,1},{2,3}}, dimensions={0},
use_global_device_ids=true, channel_id=1
ag2 = f32[2] all-gather(param), replica_groups={{0,2},{1,3}}, dimensions={0},
use_global_device_ids=true, channel_id=2
ag3 = f32[4] all-gather(param), replica_groups={{0,1,2,3}}, dimensions={0},
use_global_device_ids=true, channel_id=3
ROOT tuple = (f32[2], f32[2], f32[4]) tuple(ag1, ag2, ag3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 2,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> replica_analysis,
HloReplicationAnalysis::Run(module.get(),
false));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> partition_analysis,
HloReplicationAnalysis::Run(module.get(),
true));
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag1"), {}));
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag2"), {}));
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag3"), {}));
EXPECT_TRUE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag1"), {}));
EXPECT_FALSE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag2"), {}));
EXPECT_TRUE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag3"), {}));
}
TEST_F(HloReplicationAnalysisTest, PartiallyReplicatedDynamicSlice) {
const std::string module_str = R"(
HloModule PartiallyReplicatedDynamicSlice
ENTRY entry {
constant = s32[8] constant({1, 3, 9, 10, 1, 3, 9, 10})
replica-id = u32[] replica-id()
ROOT dynamic-slice = s32[1] dynamic-slice(constant, replica-id), dynamic_slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 8,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> replica_analysis,
HloReplicationAnalysis::RunWithPartialReplication(
module.get(),
false));
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}));
std::vector<ReplicaGroup> replica_groups(4);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(4);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(5);
replica_groups[2].add_replica_ids(2);
replica_groups[2].add_replica_ids(6);
replica_groups[3].add_replica_ids(3);
replica_groups[3].add_replica_ids(7);
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}, replica_groups));
std::vector<ReplicaGroup> replica_groups_2(2);
replica_groups_2[0].add_replica_ids(0);
replica_groups_2[0].add_replica_ids(1);
replica_groups_2[0].add_replica_ids(2);
replica_groups_2[0].add_replica_ids(3);
replica_groups_2[1].add_replica_ids(4);
replica_groups_2[1].add_replica_ids(5);
replica_groups_2[1].add_replica_ids(6);
replica_groups_2[1].add_replica_ids(7);
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}, replica_groups_2));
}
TEST_F(HloReplicationAnalysisTest, OptimizationBarrier) {
const std::string module_str = R"(
HloModule OptimizationBarrier
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element.0 = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ar0 = f32[] all-reduce(get-tuple-element.0), to_apply=sum, replica_groups={{0,1}}
ar1 = f32[] all-reduce(get-tuple-element.1), to_apply=sum, replica_groups={{0},{1}}
tuple = (f32[], f32[]) tuple(ar0, ar1)
opt-barrier = (f32[], f32[]) opt-barrier(tuple)
gte.0 = f32[] get-tuple-element(opt-barrier), index=0
gte.1 = f32[] get-tuple-element(opt-barrier), index=1
ROOT tuple.1 = (f32[], f32[]) tuple(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 2));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte.0"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte.1"), {}));
}
}
} |
1,852 | cpp | tensorflow/tensorflow | constant_value | third_party/xla/xla/service/constant_value.cc | third_party/xla/xla/service/constant_value_test.cc | #ifndef XLA_SERVICE_CONSTANT_VALUE_H_
#define XLA_SERVICE_CONSTANT_VALUE_H_
#include <string>
#include "absl/status/statusor.h"
#include "xla/literal.h"
#include "xla/util.h"
namespace xla {
class ConstantValue {
public:
ConstantValue(uint64_t value, int32_t bitwidth, bool is_signed)
: value_(is_signed
? absl::bit_cast<uint64_t>(
absl::bit_cast<int64_t>(
value << (8 * sizeof(uint64_t) - bitwidth)) >>
(8 * sizeof(uint64_t) - bitwidth))
: KeepLowerBits(value, bitwidth)),
bitwidth_(bitwidth),
is_signed_(is_signed) {}
static ConstantValue GetZero(int32_t bitwidth, bool is_signed) {
return ConstantValue(0, bitwidth, is_signed);
}
static ConstantValue GetOne(int32_t bitwidth, bool is_signed) {
return ConstantValue(1, bitwidth, is_signed);
}
static ConstantValue GetSigned(int64_t value, int32_t bitwidth) {
return ConstantValue(absl::bit_cast<uint64_t>(value), bitwidth,
true);
}
static ConstantValue GetUnsigned(uint64_t value, int32_t bitwidth) {
return ConstantValue(value, bitwidth, false);
}
static absl::StatusOr<ConstantValue> FromLiteral(const Literal& literal);
ConstantValue add(const ConstantValue& other) const {
return ConstantValue(value_ + other.value_, bitwidth_, is_signed_);
}
ConstantValue sub(const ConstantValue& other) const {
return ConstantValue(value_ - other.value_, bitwidth_, is_signed_);
}
ConstantValue div(const ConstantValue& other) const;
ConstantValue mod(const ConstantValue& other) const;
ConstantValue mul(const ConstantValue& other) const;
bool lt(const ConstantValue& other) const;
bool gt(const ConstantValue& other) const;
bool eq(const ConstantValue& other) const { return *this == other; }
int64_t GetSignedValue() const { return absl::bit_cast<int64_t>(value_); }
uint64_t GetUnsignedValue() const { return value_; }
int32_t GetBitwidth() const { return bitwidth_; }
bool IsSigned() const { return is_signed_; }
bool operator==(const ConstantValue& other) const {
return value_ == other.value_ && bitwidth_ == other.bitwidth_ &&
is_signed_ == other.is_signed_;
}
std::string ToString() const;
private:
uint64_t value_;
int32_t bitwidth_;
bool is_signed_;
};
}
#endif
#include "xla/service/constant_value.h"
#include <string>
namespace xla {
absl::StatusOr<ConstantValue> ConstantValue::FromLiteral(
const Literal& literal) {
CHECK_EQ(literal.shape().dimensions_size(), 0) << "Expected scalar literal";
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<ConstantValue>>(
[&](auto primitive_type_constant) -> absl::StatusOr<ConstantValue> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return ConstantValue(
static_cast<uint64_t>(
literal.GetFirstElement<
primitive_util::NativeTypeOf<primitive_type_constant>>()),
primitive_util::BitWidth(primitive_type_constant),
primitive_util::IsSignedIntegralType(primitive_type_constant));
}
return InvalidArgument("Unsupported type");
},
literal.shape().element_type());
}
ConstantValue ConstantValue::div(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ / other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) /
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
ConstantValue ConstantValue::mod(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ % other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) %
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
ConstantValue ConstantValue::mul(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ * other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) *
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
bool ConstantValue::lt(const ConstantValue& other) const {
if (!is_signed_) {
return value_ < other.value_;
}
return absl::bit_cast<int64_t>(value_) <
absl::bit_cast<int64_t>(other.value_);
}
bool ConstantValue::gt(const ConstantValue& other) const {
if (!is_signed_) {
return value_ > other.value_;
}
return absl::bit_cast<int64_t>(value_) >
absl::bit_cast<int64_t>(other.value_);
}
std::string ConstantValue::ToString() const {
return is_signed_ ? absl::StrCat(GetSignedValue())
: absl::StrCat(GetUnsignedValue());
}
} | #include "xla/service/constant_value.h"
#include <gtest/gtest.h>
#include "xla/literal_util.h"
namespace xla {
namespace {
class ConstantValueTest : public ::testing::Test {};
TEST_F(ConstantValueTest, ZeroTest32) {
ConstantValue zero = ConstantValue::GetZero(32, false);
EXPECT_EQ(zero.GetSignedValue(), 0);
EXPECT_EQ(zero.GetUnsignedValue(), 0);
EXPECT_EQ(zero.GetBitwidth(), 32);
EXPECT_FALSE(zero.IsSigned());
ConstantValue zero_s = ConstantValue::GetZero(32, true);
EXPECT_EQ(zero_s.GetSignedValue(), 0);
EXPECT_EQ(zero_s.GetUnsignedValue(), 0);
EXPECT_EQ(zero_s.GetBitwidth(), 32);
EXPECT_TRUE(zero_s.IsSigned());
}
TEST_F(ConstantValueTest, OneTest32) {
ConstantValue one = ConstantValue::GetOne(32, false);
EXPECT_EQ(one.GetSignedValue(), 1);
EXPECT_EQ(one.GetUnsignedValue(), 1);
EXPECT_EQ(one.GetBitwidth(), 32);
EXPECT_FALSE(one.IsSigned());
ConstantValue one_s = ConstantValue::GetOne(32, true);
EXPECT_EQ(one_s.GetSignedValue(), 1);
EXPECT_EQ(one_s.GetUnsignedValue(), 1);
EXPECT_EQ(one_s.GetBitwidth(), 32);
EXPECT_TRUE(one_s.IsSigned());
}
TEST_F(ConstantValueTest, Signed23) {
ConstantValue signed_number = ConstantValue::GetSigned(4194303, 23);
EXPECT_EQ(signed_number.GetSignedValue(), 4194303);
EXPECT_EQ(signed_number.GetBitwidth(), 23);
EXPECT_TRUE(signed_number.IsSigned());
ConstantValue signed_number_of = ConstantValue::GetSigned(4194304, 23);
EXPECT_EQ(signed_number_of.GetSignedValue(), -4194304);
EXPECT_EQ(signed_number_of.GetBitwidth(), 23);
EXPECT_TRUE(signed_number_of.IsSigned());
}
TEST_F(ConstantValueTest, Unsigned23) {
ConstantValue unsigned_number = ConstantValue::GetUnsigned(8388607, 23);
EXPECT_EQ(unsigned_number.GetUnsignedValue(), 8388607);
EXPECT_EQ(unsigned_number.GetBitwidth(), 23);
EXPECT_FALSE(unsigned_number.IsSigned());
ConstantValue unsigned_number_of = ConstantValue::GetUnsigned(8388608, 23);
EXPECT_EQ(unsigned_number_of.GetUnsignedValue(), 0);
EXPECT_EQ(unsigned_number_of.GetBitwidth(), 23);
EXPECT_FALSE(unsigned_number_of.IsSigned());
}
TEST_F(ConstantValueTest, FromLiteral) {
auto cv_8 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int8_t>(-32)));
EXPECT_TRUE(cv_8.ok());
EXPECT_TRUE(cv_8->IsSigned());
EXPECT_EQ(cv_8->GetBitwidth(), 8);
EXPECT_EQ(cv_8->GetSignedValue(), -32);
auto cv_u8 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int8_t>(32)));
EXPECT_TRUE(cv_u8.ok());
EXPECT_TRUE(cv_u8->IsSigned());
EXPECT_EQ(cv_u8->GetBitwidth(), 8);
EXPECT_EQ(cv_u8->GetUnsignedValue(), 32);
auto cv_16 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int16_t>(32000)));
EXPECT_TRUE(cv_16.ok());
EXPECT_TRUE(cv_16->IsSigned());
EXPECT_EQ(cv_16->GetBitwidth(), 16);
EXPECT_EQ(cv_16->GetSignedValue(), 32000);
auto cv_u16 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint16_t>(33000)));
EXPECT_TRUE(cv_u16.ok());
EXPECT_FALSE(cv_u16->IsSigned());
EXPECT_EQ(cv_u16->GetBitwidth(), 16);
EXPECT_EQ(cv_u16->GetUnsignedValue(), 33000);
auto cv_32 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int32_t>(-2000000000)));
EXPECT_TRUE(cv_32.ok());
EXPECT_TRUE(cv_32->IsSigned());
EXPECT_EQ(cv_32->GetBitwidth(), 32);
EXPECT_EQ(cv_32->GetSignedValue(), -2000000000);
auto cv_u32 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint32_t>(3000000000)));
EXPECT_TRUE(cv_u32.ok());
EXPECT_FALSE(cv_u32->IsSigned());
EXPECT_EQ(cv_u32->GetBitwidth(), 32);
EXPECT_EQ(cv_u32->GetUnsignedValue(), 3000000000);
auto cv_64 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int64_t>(3000000000)));
EXPECT_TRUE(cv_64.ok());
EXPECT_TRUE(cv_64->IsSigned());
EXPECT_EQ(cv_64->GetBitwidth(), 64);
EXPECT_EQ(cv_64->GetSignedValue(), 3000000000);
auto cv_u64 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint64_t>(6000000000)));
EXPECT_TRUE(cv_u64.ok());
EXPECT_FALSE(cv_u64->IsSigned());
EXPECT_EQ(cv_u64->GetBitwidth(), 64);
EXPECT_EQ(cv_u64->GetUnsignedValue(), 6000000000);
}
TEST_F(ConstantValueTest, Add) {
ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(1, 23);
ConstantValue result = lhs.add(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(8388600, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388607);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetSignedValue(), -6);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(-1, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetSignedValue(), 4194303);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Sub) {
ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(1, 23);
ConstantValue result = lhs.sub(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388606);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388607);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetSignedValue(), -14);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(1, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetSignedValue(), 4194303);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Div) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.div(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 2);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetSignedValue(), -2);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetSignedValue(), -2097152);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Mod) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.mod(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 6);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), -1);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(1, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Mul) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.mul(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 4418);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(8388607, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
result = lhs.mul(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388606);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
result = lhs.mul(rhs);
EXPECT_EQ(result.GetSignedValue(), -30);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, LtGtEq) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
EXPECT_FALSE(lhs.lt(rhs));
EXPECT_TRUE(lhs.gt(rhs));
lhs = ConstantValue::GetUnsigned(8388607, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
EXPECT_FALSE(lhs.lt(rhs));
EXPECT_TRUE(lhs.gt(rhs));
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
EXPECT_TRUE(lhs.lt(rhs));
EXPECT_FALSE(lhs.gt(rhs));
lhs = ConstantValue::GetUnsigned(43, 23);
rhs = ConstantValue::GetUnsigned(43, 23);
EXPECT_TRUE(lhs.eq(rhs));
EXPECT_TRUE(rhs.eq(lhs));
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(-10, 23);
EXPECT_TRUE(lhs.eq(rhs));
EXPECT_TRUE(rhs.eq(lhs));
lhs = ConstantValue::GetUnsigned(4194304, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
EXPECT_FALSE(lhs.eq(rhs));
EXPECT_FALSE(rhs.eq(lhs));
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
EXPECT_FALSE(lhs.eq(rhs));
EXPECT_FALSE(rhs.eq(lhs));
}
}
} |
1,853 | cpp | tensorflow/tensorflow | all_gather_decomposer | third_party/xla/xla/service/all_gather_decomposer.cc | third_party/xla/xla/service/all_gather_decomposer_test.cc | #ifndef XLA_SERVICE_ALL_GATHER_DECOMPOSER_H_
#define XLA_SERVICE_ALL_GATHER_DECOMPOSER_H_
#include <cstdint>
#include <functional>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
namespace xla {
class AllGatherDecomposer : public HloModulePass {
public:
explicit AllGatherDecomposer(
std::function<bool(const HloAllGatherInstruction&)> should_decompose)
: should_decompose_(std::move(should_decompose)) {}
AllGatherDecomposer()
: should_decompose_(
[](const HloAllGatherInstruction& ag) { return true; }) {}
absl::string_view name() const override { return "all_gather_decomposer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
virtual HloInstruction* TranslateAllGatherToAllReducePerOperand(
CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,
const Shape& output_shape, HloInstruction* operand, HloComputation* comp,
int64_t ag_dim);
virtual bool ShouldDecompose(const HloAllGatherInstruction& ag) const {
return should_decompose_(ag);
}
absl::Status DecomposeAllGather(HloAllGatherInstruction* ag,
HloComputation* comp);
private:
std::function<bool(const HloAllGatherInstruction&)> should_decompose_;
};
}
#endif
#include "xla/service/all_gather_decomposer.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {
HloComputation::Builder sum_b("add");
auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
if (type == PRED) {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));
} else {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));
}
HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());
return reduction;
}
}
HloInstruction* AllGatherDecomposer::TranslateAllGatherToAllReducePerOperand(
CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,
const Shape& output_shape, HloInstruction* operand, HloComputation* comp,
int64_t ag_dim) {
std::vector<HloInstruction*> start_indices =
CreateStartIndicesForCollectiveDecomposition(
group_mode, ag.replica_groups(), operand->shape(), ag_dim, comp)
.value();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(output_shape.element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(output_shape, zero, {}));
auto dus = comp->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
zero->shape(), zero, operand, start_indices));
auto ar = comp->AddInstruction(HloInstruction::CreateAllReduce(
dus->shape(), {dus},
MakeBinaryAdd(dus->shape().element_type(), comp->parent()),
ag.device_list(),
ag.constrain_layout(), ag.channel_id(),
ag.use_global_device_ids()));
return ar;
}
absl::Status AllGatherDecomposer::DecomposeAllGather(
HloAllGatherInstruction* ag, HloComputation* comp) {
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ag->channel_id().has_value(),
ag->use_global_device_ids()));
if (ag->operand_count() > 1) {
std::vector<HloInstruction*> tuple_inputs;
for (int i = 0; i < ag->operand_count(); ++i) {
auto* input_operand = ag->mutable_operand(i);
const auto& output_shape = ag->shape().tuple_shapes(i);
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, output_shape, input_operand, comp,
ag->all_gather_dimension());
tuple_inputs.push_back(ar);
}
auto tup = comp->AddInstruction(HloInstruction::CreateTuple(tuple_inputs));
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(tup));
} else {
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, ag->shape(), ag->mutable_operand(0), comp,
ag->all_gather_dimension());
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(ar));
}
TF_RETURN_IF_ERROR(comp->RemoveInstructionAndUnusedOperands(ag));
return absl::OkStatus();
}
absl::StatusOr<bool> AllGatherDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kAllGather) {
continue;
}
auto ag = Cast<HloAllGatherInstruction>(hlo);
if (ShouldDecompose(*ag)) {
TF_RETURN_IF_ERROR(DecomposeAllGather(ag, comp));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/all_gather_decomposer.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllGatherDecomposerTest = HloTestBase;
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAndPartitionAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0}}, channel_id=1,
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::PartitionId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTrivialGroup) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0,1,2,3}},
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroups) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto id =
AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), op::ReplicaId())));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroupsGlobalIds) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}, channel_id=1,
use_global_device_ids=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto global_id =
op::Add(op::Multiply(op::ReplicaId(), op::Constant()), op::PartitionId());
auto id = AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), global_id)));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTuple) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
param1 = f32[10,16] parameter(1)
ROOT ag = (f32[10,80], f32[10,64]) all-gather(param0, param1),
replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(1), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant())))));
}
}
} |
1,854 | cpp | tensorflow/tensorflow | all_reduce_reassociate | third_party/xla/xla/service/all_reduce_reassociate.cc | third_party/xla/xla/service/all_reduce_reassociate_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_REASSOCIATE_H_
#define XLA_SERVICE_ALL_REDUCE_REASSOCIATE_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceReassociate : public HloModulePass {
public:
explicit AllReduceReassociate(bool reassociate_converted_ar = false)
: reassociate_converted_ar_(reassociate_converted_ar) {}
absl::string_view name() const override { return "all-reduce-reassociate"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool reassociate_converted_ar_;
};
}
#endif
#include "xla/service/all_reduce_reassociate.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
namespace m = match;
bool AreAllreduceKeysEqual(AllReduceKey& key0, AllReduceKey& key1,
bool ignore_element_type) {
if (ignore_element_type) {
return std::get<0>(key0) == std::get<0>(key1) &&
std::get<2>(key0) == std::get<2>(key1) &&
std::get<3>(key0) == std::get<3>(key1) &&
std::get<4>(key0) == std::get<4>(key1) &&
std::get<5>(key0) == std::get<5>(key1);
} else {
return key0 == key1;
}
}
bool AreCompatible(const HloAllReduceInstruction* ar0,
const HloAllReduceInstruction* ar1, ReductionKind op_kind,
bool ignore_element_type) {
std::optional<AllReduceKey> key0 = GetAllReduceKey(ar0);
std::optional<AllReduceKey> key1 = GetAllReduceKey(ar1);
auto kind0 = MatchReductionComputation(ar0->to_apply());
return key0 && key1 && kind0 &&
AreAllreduceKeysEqual(*key0, *key1, ignore_element_type) &&
kind0 == op_kind;
}
HloInstruction* LookThroughForAllReduce(HloInstruction* instr,
const Literal& reduction_identity) {
if (instr->opcode() == HloOpcode::kDynamicSlice) {
if (instr->operand(0)->opcode() != HloOpcode::kAllReduce ||
instr->operand(0)->user_count() != 1 || instr->user_count() != 1) {
return nullptr;
}
return instr;
}
while (instr->opcode() != HloOpcode::kAllReduce) {
if (instr->user_count() != 1) {
return nullptr;
}
if (instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kPad &&
instr->opcode() != HloOpcode::kSlice &&
instr->opcode() != HloOpcode::kConvert) {
return nullptr;
}
if (instr->opcode() == HloOpcode::kPad) {
if (!instr->operand(1)->IsConstant()) {
return nullptr;
}
if (instr->operand(1)->literal() != reduction_identity) {
return nullptr;
}
}
instr = instr->mutable_operand(0);
}
if (instr->user_count() != 1) {
return nullptr;
}
return instr;
}
bool ReassociateAllReduceIsProfitable(HloInstruction* ar0, HloInstruction* ar1,
HloInstruction* reassociated_inst) {
int64_t pre_reassociated_size = ShapeUtil::ElementsIn(ar0->shape());
if (ar0 != ar1) {
pre_reassociated_size += ShapeUtil::ElementsIn(ar1->shape());
}
return pre_reassociated_size >=
ShapeUtil::ElementsIn(reassociated_inst->shape());
}
bool AreCompatibleConverts(const HloInstruction* convert0,
const HloInstruction* convert1) {
bool is_compatible = true;
if (convert0) {
is_compatible &= primitive_util::CastPreservesValues(
convert0->operand(0)->shape().element_type(),
convert0->shape().element_type());
}
if (convert1) {
is_compatible &= primitive_util::CastPreservesValues(
convert1->operand(0)->shape().element_type(),
convert1->shape().element_type());
}
if (convert0 && convert1) {
CHECK(convert0->shape().element_type() == convert1->shape().element_type());
is_compatible &= convert0->operand(0)->shape().element_type() ==
convert1->operand(0)->shape().element_type();
}
return is_compatible;
}
template <typename Pattern>
auto OptionalConvertWithOneUser(HloInstruction** optional_convert,
Pattern pattern) {
return m::AnyOf<HloInstruction>(
m::Convert(optional_convert, pattern).WithOneUser(), std::move(pattern));
}
bool MatchOperandsToAllReduceWithOptionalConvert(HloInstruction* inst,
HloInstruction** convert0,
HloInstruction** convert1) {
auto ar_op_optional_convert_pattern =
m::Op()
.WithOperand(0, OptionalConvertWithOneUser(convert0, m::AllReduce()))
.WithOperand(1, OptionalConvertWithOneUser(convert1, m::AllReduce()))
.WithPredicate([](const HloInstruction* inst) {
return inst->shape().IsArray();
});
return Match(inst, ar_op_optional_convert_pattern);
}
}
absl::StatusOr<bool> AllReduceReassociate::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceReassociate because the module contains all-reduce "
"with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
std::optional<ReductionKind> kind = MatchReductionInstruction(inst);
if (!kind) {
continue;
}
std::optional<Literal> reduction_identity =
GetReductionIdentity(*kind, inst->shape().element_type());
if (!reduction_identity) {
continue;
}
HloInstruction* lhs = LookThroughForAllReduce(inst->mutable_operand(0),
*reduction_identity);
if (lhs == nullptr) {
continue;
}
HloInstruction* rhs = LookThroughForAllReduce(inst->mutable_operand(1),
*reduction_identity);
if (rhs == nullptr) {
continue;
}
if (!inst->shape().IsArray()) {
continue;
}
if (lhs->opcode() != rhs->opcode() ||
(lhs->opcode() == HloOpcode::kDynamicSlice &&
!ShapeUtil::Compatible(lhs->operand(0)->shape(),
rhs->operand(0)->shape()))) {
continue;
}
HloAllReduceInstruction* ar0 = nullptr;
HloAllReduceInstruction* ar1 = nullptr;
bool reduce_scatter_pattern_match = false;
if (lhs->opcode() == HloOpcode::kDynamicSlice) {
HloInstruction* original_rhs_operand = rhs->mutable_operand(0);
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, lhs->mutable_operand(0)));
if (!lhs->Identical(*rhs)) {
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));
continue;
}
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));
ar0 = Cast<HloAllReduceInstruction>(lhs->mutable_operand(0));
ar1 = Cast<HloAllReduceInstruction>(rhs->mutable_operand(0));
reduce_scatter_pattern_match = true;
} else {
ar0 = Cast<HloAllReduceInstruction>(lhs);
ar1 = Cast<HloAllReduceInstruction>(rhs);
}
if (!ReassociateAllReduceIsProfitable(lhs, rhs, inst)) {
continue;
}
HloInstruction* convert0 = nullptr;
HloInstruction* convert1 = nullptr;
if (!MatchOperandsToAllReduceWithOptionalConvert(inst, &convert0,
&convert1)) {
VLOG(2) << "One or both inputs are type-converted.";
}
bool should_promote_ar = convert0 || convert1;
if (should_promote_ar) {
if (!reassociate_converted_ar_) {
VLOG(2) << "Promotions of all_reduces for reassociation will be "
"disabled.";
continue;
}
if (!AreCompatibleConverts(convert0, convert1)) {
VLOG(2) << "Inputs' Converts are not preserving "
"value, skipping";
continue;
}
}
HloInstruction* op_operand0 = inst->mutable_operand(0);
HloInstruction* op_operand1 = inst->mutable_operand(1);
if (convert0) {
op_operand0 = convert0->mutable_operand(0);
}
if (convert1) {
op_operand1 = convert1->mutable_operand(0);
}
if (!AreCompatible(ar0, ar1, *kind,
should_promote_ar)) {
VLOG(2) << "All-Reduce operations are not compatible, skipping";
continue;
}
VLOG(2) << "Reassociated:";
VLOG(2) << "\tAR0: " << ar0->ToString();
VLOG(2) << "\tAR1: " << ar1->ToString();
auto op_users = inst->users();
HloInstruction* new_op_operand0 = ar0->mutable_operand(0);
HloInstruction* new_op_operand1 = ar1->mutable_operand(0);
if (convert0) {
HloInstruction* ar0_operand = ar0->mutable_operand(0);
TF_RETURN_IF_ERROR(convert0->ReplaceOperandWith(0, ar0_operand));
new_op_operand0 = convert0;
}
if (convert1) {
HloInstruction* ar1_operand = ar1->mutable_operand(0);
TF_RETURN_IF_ERROR(convert1->ReplaceOperandWith(0, ar1_operand));
new_op_operand1 = convert1;
}
HloInstruction* new_op = inst;
if (should_promote_ar) {
new_op = computation->AddInstruction(inst->CloneWithNewOperands(
inst->shape(), {new_op_operand0, new_op_operand1}));
} else if (reduce_scatter_pattern_match) {
new_op = computation->AddInstruction(inst->CloneWithNewOperands(
ar0->shape(), {new_op_operand0, new_op_operand1}));
}
Shape new_ar_out_shape = inst->shape();
CHECK(!should_promote_ar || !reduce_scatter_pattern_match);
if (should_promote_ar) {
new_ar_out_shape.set_element_type(
new_op_operand0->shape().element_type());
} else if (reduce_scatter_pattern_match) {
new_ar_out_shape = ar0->shape();
} else {
TF_RETURN_IF_ERROR(ar0->ReplaceAllUsesWith(ar0->mutable_operand(0)));
TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(ar1->mutable_operand(0)));
}
HloInstruction* new_ar = computation->AddInstruction(
ar0->CloneWithNewOperands(new_ar_out_shape, {new_op}));
if (new_ar->channel_id()) {
new_ar->set_channel_id(next_channel_id++);
}
if (should_promote_ar) {
HloComputation* to_apply = new_ar->to_apply();
PrimitiveType type = new_ar->shape().element_type();
std::string name = absl::StrCat(to_apply->name(), "_reassoc_promoted");
HloComputation::Builder promoted(name);
auto x = promoted.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = promoted.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
promoted.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}),
to_apply->root_instruction()->opcode(), x, y));
HloComputation* to_apply_promoted =
inst->GetModule()->AddEmbeddedComputation(promoted.Build());
new_ar->set_to_apply(to_apply_promoted);
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_ar));
} else if (reduce_scatter_pattern_match) {
auto dyn_slice_operands = lhs->mutable_operands();
dyn_slice_operands[0] = new_ar;
HloInstruction* new_dyn_slice = inst->parent()->AddInstruction(
lhs->CloneWithNewOperands(inst->shape(), dyn_slice_operands));
TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_dyn_slice));
} else {
TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_ar));
}
if (should_promote_ar || reduce_scatter_pattern_match) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));
}
if (reduce_scatter_pattern_match) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(lhs));
if (lhs != rhs) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rhs));
}
}
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));
if (ar0 != ar1) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));
}
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_reduce_reassociate.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
using ::testing::_;
class AllReduceSimplifierTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change,
bool reassociate_converted_ar = false) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed =
AllReduceReassociate(reassociate_converted_ar).Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule>& module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
}
};
TEST_F(AllReduceSimplifierTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleWithChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), channel_id=1, replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), channel_id=1, replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum
ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum
add0 = f32[8] add(ar0, ar1)
add1 = f32[8] add(add0, ar2)
ROOT add2 = f32[8] add(add1, ar3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(
m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),
m::Parameter(3))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleTree) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum
ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum
add0 = f32[8] add(ar0, ar1)
add1 = f32[8] add(ar2, ar3)
ROOT add2 = f32[8] add(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Add(m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, MismatchOp0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchOp1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=max
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={{0}}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchHasChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, channel_id=3, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchUseGlobalDeviceId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={{0, 1}}, channel_id=3, use_global_device_ids=true, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={{0, 1}}, channel_id=4, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, NotSingleUser) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
add = f32[8] add(ar0, ar1)
ROOT t = (f32[8], f32[8]) tuple(ar0, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, DoubleUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
add = f32[8] add(ar0, ar0)
ROOT c = f32[8] copy(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
}
TEST_F(AllReduceSimplifierTest, PaddedUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[12]{0} pad(ar0, constant.1), padding=0_4
pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4
ROOT add = f32[12] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Pad(m::Parameter(0), _),
m::Pad(m::Parameter(1), _))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, PaddedUseInvalidReduceValue) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(-1.0)
pad = f32[12]{0} pad(ar0, constant.1), padding=0_4
pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4
ROOT add = f32[12] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 2);
}
TEST_F(AllReduceSimplifierTest, PaddedUseNotProfitable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[17]{0} pad(ar0, constant.1), padding=0_9
pad.1 = f32[17]{0} pad(ar1, constant.1), padding=0_9
ROOT add = f32[17] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 2);
}
TEST_F(AllReduceSimplifierTest, PaddedUseDoubleUseNotProfitable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[9]{0} pad(ar0, constant.1), padding=0_1
ROOT add = f32[9] add(pad, pad)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, ReshapeUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
rshp0 = f32[8]{0} reshape(ar0)
rshp1 = f32[8]{0} reshape(ar1)
ROOT add = f32[8] add(rshp0, rshp1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Reshape(m::Parameter(0)),
m::Reshape(m::Parameter(1)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SliceUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
rshp0 = f32[4]{0} slice(ar0), slice={[0:4]}
rshp1 = f32[4]{0} slice(ar1), slice={[0:4]}
ROOT add = f32[4] add(rshp0, rshp1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Slice(m::Parameter(0)),
m::Slice(m::Parameter(1)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, ChainWithConvert) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1
convert0 = f32[8] convert(ar0)
convert1 = f32[8] convert(ar1)
add0 = f32[8] add(convert0, convert1)
convert2 = f32[8] convert(ar2)
add1 = f32[8] add(add0, convert2)
convert3 = f32[8] convert(ar3)
add2 = f32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true,
true));
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Convert(m::AllReduce(m::Add(m::Add(m::Add(m::Convert(m::Parameter(0)),
m::Convert(m::Parameter(1))),
m::Convert(m::Parameter(2))),
m::Convert(m::Parameter(3))))));
EXPECT_EQ(AllReduceCount(module), 1);
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(0)->shape(),
GmockMatch(::xla::match::Shape().WithElementType(F32)));
}
TEST_F(AllReduceSimplifierTest, AllreduceWithConvertIncompatibleType) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
max.1 {
x.48 = bf16[] parameter(0)
y.48 = bf16[] parameter(1)
ROOT max.2533 = bf16[] maximum(x.48, y.48)
}
min.1 {
x.49 = bf16[] parameter(0)
y.49 = bf16[] parameter(1)
ROOT min.2534 = bf16[] minimum(x.49, y.49)
}
mul.1 {
x.50 = bf16[] parameter(0)
y.50 = bf16[] parameter(1)
ROOT mul.2535 = bf16[] multiply(x.50, y.50)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=max.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=min.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=mul.1
convert0 = f32[8] convert(ar0)
convert1 = f32[8] convert(ar1)
add0 = f32[8] add(convert0, convert1)
convert2 = f32[8] convert(ar2)
add1 = f32[8] add(add0, convert2)
convert3 = f32[8] convert(ar3)
add2 = f32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
SCOPED_TRACE(module->ToString());
}
TEST_F(AllReduceSimplifierTest, AllreduceWithLossyConvert) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1
convert0 = u32[8] convert(ar0)
convert1 = u32[8] convert(ar1)
add0 = u32[8] add(convert0, convert1)
convert2 = u32[8] convert(ar2)
add1 = u32[8] add(add0, convert2)
convert3 = u32[8] convert(ar3)
add2 = u32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
SCOPED_TRACE(module->ToString());
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePattern) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = f32[1,8] parameter(2)
p3 = s32[] parameter(3)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[1,8] all-reduce(p2), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}
dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn1)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::DynamicSlice(
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Constant(), m::Parameter(3)));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePatternSameOperand) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = s32[] parameter(2)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar2 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p2), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p2), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn0)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::DynamicSlice(
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(0)),
m::Parameter(1))),
m::Constant(), m::Parameter(2)));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSliceDifferentSlices) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = f32[1,16] parameter(2)
p3 = s32[] parameter(3)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[1,16] all-reduce(p2), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}
dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn1)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Add(m::DynamicSlice(),
m::DynamicSlice(m::AllReduce(), m::Constant(), m::Parameter(3))));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 2);
}
}
} |
1,855 | cpp | tensorflow/tensorflow | sharding_remover | third_party/xla/xla/service/sharding_remover.cc | third_party/xla/xla/service/sharding_remover_test.cc | #ifndef XLA_SERVICE_SHARDING_REMOVER_H_
#define XLA_SERVICE_SHARDING_REMOVER_H_
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ShardingRemover : public HloModulePass {
public:
absl::string_view name() const override { return "sharding-remover"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/sharding_remover.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> ShardingRemover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
const absl::flat_hash_set<absl::string_view> to_remove_sharding_ops = {
"Sharding", "SPMDShardToFullShape", "SPMDFullToShardShape"};
for (HloComputation* computation : module->computations(execution_threads)) {
auto instructions = computation->MakeInstructionPostOrder();
std::reverse(instructions.begin(), instructions.end());
for (HloInstruction* instruction : instructions) {
if (instruction->opcode() != HloOpcode::kCustomCall) {
continue;
}
if (!to_remove_sharding_ops.contains(instruction->custom_call_target())) {
continue;
}
CHECK(instruction->operand_count() == 1)
<< "Sharding instruction must have exactly one operand";
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(
instruction->mutable_operand(0), name()));
changed = true;
if (instruction->custom_call_target() == "Sharding") {
auto copy = computation->AddInstruction(
HloInstruction::CreateUnary(instruction->shape(), HloOpcode::kCopy,
instruction->mutable_operand(0)));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(instruction, copy));
instruction = copy;
}
}
}
return changed;
}
} | #include "xla/service/sharding_remover.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/status_macros.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ShardingRemoverTest = HloTestBase;
TEST_F(ShardingRemoverTest, RemoveSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.3379 = f32[1,1]{1,0} parameter(0)
%custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),
custom_call_target="Sharding", sharding={replicated}
ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
auto parameter = root->operand(0);
EXPECT_EQ(parameter->user_count(), 2);
bool replaced = false;
for (HloInstruction* user : parameter->users()) {
if (user->opcode() == HloOpcode::kCopy) {
replaced = true;
EXPECT_THAT(user, op::Copy(op::Parameter()));
break;
}
}
EXPECT_TRUE(replaced);
}
TEST_F(ShardingRemoverTest, RemoveSPMDShardingToFullShape) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.3379 = f32[1,1]{1,0} parameter(0)
%custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),
custom_call_target="SPMDShardToFullShape", sharding={replicated}
ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
}
TEST_F(ShardingRemoverTest, RemoveSPMDFullToShardShape) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.3379 = f32[1,1]{1,0} parameter(0)
%custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),
custom_call_target="SPMDFullToShardShape", sharding={replicated}
ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
}
TEST_F(ShardingRemoverTest, NoChangeForOtherCustomCall) {
const char* const hlo_string = R"(
HloModule cluster_2013453984438090939__.47
ENTRY %cluster_2013453984438090939__.47
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%custom-call = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
custom-call(bf16[2,209664]{1,0} %arg_tuple.1), custom_call_target="TopK"
%get-tuple-element = bf16[2,2000]{1,0}
get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call),
index=0
%get-tuple-element.1 = s32[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0},
s32[2,2000]{1,0}) %custom-call), index=1, sharding={replicated}
ROOT %tuple.46 = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
tuple(bf16[2,2000]{1,0} %get-tuple-element, s32[2,2000]{1,0}
%get-tuple-element.1),
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_FALSE(changed);
}
}
} |
1,856 | cpp | tensorflow/tensorflow | triangular_solve_expander | third_party/xla/xla/service/triangular_solve_expander.cc | third_party/xla/xla/service/triangular_solve_expander_test.cc | #ifndef XLA_SERVICE_TRIANGULAR_SOLVE_EXPANDER_H_
#define XLA_SERVICE_TRIANGULAR_SOLVE_EXPANDER_H_
#include "absl/container/flat_hash_map.h"
#include "xla/client/xla_builder.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class TriangularSolveExpander : public OpExpanderPass {
public:
explicit TriangularSolveExpander(int64_t block_size = 128);
absl::string_view name() const override {
return "triangular_solve_expander";
}
protected:
virtual bool UseDirectSolves() const { return true; }
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
XlaOp SolveByInvertingDiagonalBlocks(XlaOp a, XlaOp b, bool left_side,
bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal,
PrecisionConfig::Precision precision);
virtual XlaOp InvertDiagonalBlocks(XlaOp diag_blocks, bool lower_triangular,
PrecisionConfig::Precision precision);
XlaOp SolveDirectly(XlaOp a, XlaOp b, bool left_side, bool lower,
bool transpose_a, bool conjugate_a, bool unit_diagonal,
PrecisionConfig::Precision precision);
XlaOp BuildTriangularSolve(XlaOp a, XlaOp b, bool left_side, bool lower,
bool transpose_a, bool conjugate_a,
bool unit_diagonal, int64_t block_size,
PrecisionConfig::Precision precision);
private:
const int64_t block_size_;
absl::flat_hash_map<std::string, HloComputation*> computation_cache_;
};
}
#endif
#include "xla/service/triangular_solve_expander.h"
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/client/lib/constants.h"
#include "xla/client/lib/math.h"
#include "xla/client/lib/matrix.h"
#include "xla/client/lib/slicing.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
namespace xla {
namespace {
XlaOp DiagonalBlocks(XlaOp a, int64_t block_size) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(a));
int ndims = shape.rank();
int64_t n = ShapeUtil::GetDimension(shape, -1);
int64_t num_blocks = n / block_size;
absl::Span<int64_t const> batch_dims = absl::MakeConstSpan(
shape.dimensions().begin(), shape.dimensions().begin() + (ndims - 2));
XlaOp diag_blocks;
if (n == block_size) {
std::vector<int64_t> permutation(ndims);
std::iota(permutation.begin(), permutation.end(), 1);
permutation.insert(permutation.end() - 2, 0);
return Transpose(Broadcast(a, {1}), permutation);
}
if (n > block_size) {
auto start_indices =
Transpose(Broadcast(Mul(Iota(builder, S32, num_blocks),
ConstantR0<int32_t>(builder, block_size)),
{2}),
{1, 0});
std::vector<int64_t> slice_sizes(ndims);
GatherDimensionNumbers dim_numbers;
for (int i = 0; i < ndims - 2; ++i) {
dim_numbers.add_offset_dims(i);
slice_sizes[i] = ShapeUtil::GetDimension(shape, i);
}
slice_sizes[ndims - 2] = slice_sizes[ndims - 1] = block_size;
dim_numbers.add_offset_dims(ndims - 1);
dim_numbers.add_offset_dims(ndims);
dim_numbers.add_start_index_map(ndims - 2);
dim_numbers.add_start_index_map(ndims - 1);
dim_numbers.set_index_vector_dim(1);
diag_blocks = Gather(a, start_indices, dim_numbers, slice_sizes);
}
if (n % block_size != 0) {
auto last_blocks =
SliceInMinorDims(a, {n - n % block_size, n - n % block_size}, {n, n});
PaddingConfig config = MakeNoPaddingConfig(ndims);
int64_t padding = block_size - n % block_size;
config.mutable_dimensions(ndims - 2)->set_edge_padding_high(padding);
last_blocks =
Pad(last_blocks, Zero(builder, shape.element_type()), config);
auto eye =
IdentityMatrix(builder, shape.element_type(), padding, padding);
config = MakeNoPaddingConfig(2);
config.mutable_dimensions(0)->set_edge_padding_low(n % block_size);
eye = Pad(eye, Zero(builder, shape.element_type()), config);
eye = Broadcast(eye, batch_dims);
last_blocks = ConcatInDim(builder, {last_blocks, eye}, ndims - 1);
TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(last_blocks));
auto shape_dims = blocks_shape.dimensions();
auto last_blocks_dims = std::vector<int64_t>(ndims);
std::copy(shape_dims.begin(), shape_dims.end(), last_blocks_dims.begin());
last_blocks_dims.insert(last_blocks_dims.end() - 2, 1);
last_blocks = Reshape(last_blocks, last_blocks_dims);
if (n > block_size) {
diag_blocks =
ConcatInDim(builder, {diag_blocks, last_blocks}, ndims - 2);
} else {
diag_blocks = last_blocks;
}
}
return diag_blocks;
});
}
XlaOp SolveWithInvertedDiagonalBlocks(XlaOp a, XlaOp b, XlaOp inv_diag_blocks,
bool left_side, bool lower,
bool transpose_a, bool conjugate_a,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(inv_diag_blocks));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));
int64_t block_size = ShapeUtil::GetDimension(blocks_shape, -1);
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
int64_t ndims = a_shape.rank();
int64_t n = ShapeUtil::GetDimension(a_shape, -1);
int64_t num_blocks = n / block_size + (n % block_size != 0);
int64_t m_dim = (left_side) ? -1 : -2;
int64_t m = ShapeUtil::GetDimension(b_shape, m_dim);
std::vector<XlaOp> update_ops;
int bdims = b_shape.rank();
int64_t block_dim = (left_side) ? bdims - 2 : bdims - 1;
XlaOp x;
for (int i = 0; i < num_blocks; i++) {
bool backward = left_side ^ lower ^ transpose_a;
auto j = backward ? num_blocks - 1 - i : i;
int64_t block = (n % block_size != 0 && j + 1 == num_blocks)
? n % block_size
: block_size;
auto inv_block =
MaybeConjugate(Collapse(SliceInMinorDims(inv_diag_blocks, {j, 0, 0},
{j + 1, block, block}),
{ndims - 2, ndims - 1}),
conjugate_a);
int64_t k = std::min((j + 1) * block_size, n);
std::vector<int64_t> start = {j * block_size, 0};
std::vector<int64_t> end = {k, m};
if (!left_side) {
std::swap(start[0], start[1]);
std::swap(end[0], end[1]);
}
auto b_row = SliceInMinorDims(b, start, end);
XlaOp remainder;
if (i == 0) {
remainder = b_row;
} else {
if (backward) {
start = {j * block_size,
std::max(int64_t{0}, (num_blocks - i) * block_size)};
end = {k, n};
} else {
start = {j * block_size, 0};
end = {k, std::min(i * block_size, n)};
}
if (!left_side ^ transpose_a) {
std::swap(start[0], start[1]);
std::swap(end[0], end[1]);
}
auto a_row =
MaybeConjugate(SliceInMinorDims(a, start, end), conjugate_a);
if (left_side) {
remainder = b_row - BatchDot(a_row, transpose_a, x, false, precision);
} else {
remainder = b_row - BatchDot(x, false, a_row, transpose_a, precision);
}
}
XlaOp x_update;
if (left_side) {
x_update =
BatchDot(inv_block, transpose_a, remainder, false, precision);
} else {
x_update =
BatchDot(remainder, false, inv_block, transpose_a, precision);
}
if (i == 0) {
x = x_update;
} else {
if (backward) {
x = ConcatInDim(builder, {x_update, x}, block_dim);
} else {
x = ConcatInDim(builder, {x, x_update}, block_dim);
}
}
}
return x;
});
}
}
XlaOp TriangularSolveExpander::InvertDiagonalBlocks(
XlaOp diag_blocks, bool lower_triangular,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = diag_blocks.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(diag_blocks));
int64_t block_size = ShapeUtil::GetDimension(shape, -1);
int64_t num_blocks = ShapeUtil::ElementsIn(shape) / IPow(block_size, 2);
diag_blocks = Reshape(diag_blocks, {num_blocks, block_size, block_size});
diag_blocks = Triangle(diag_blocks, lower_triangular);
auto diags = GetMatrixDiagonal(diag_blocks);
auto ones = FullLike(diags, 1);
diags = Select(Eq(diags, Zero(builder, shape.element_type())), ones, diags);
auto scaled_diag_blocks = Div(diag_blocks, diags, {0, 2});
auto identity =
IdentityMatrix(builder, shape.element_type(), block_size, block_size);
auto neg_identity = -identity;
auto pos_one = Reshape(One(builder, shape.element_type()), {1, 1});
auto start_index =
ConstantR0<int>(builder, lower_triangular ? 0 : block_size - 1);
auto output_block =
DynamicUpdateSlice(neg_identity, pos_one,
{start_index, start_index});
XlaOp output = Broadcast(output_block,
{num_blocks});
std::vector<Shape> tuple_shapes = {
ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(shape.element_type(),
{num_blocks, block_size, block_size}),
ShapeUtil::MakeShape(shape.element_type(),
{num_blocks, block_size, block_size})};
Shape tuple_shape = ShapeUtil::MakeTupleShape(tuple_shapes);
auto init_i = One(builder, S32);
auto init = Tuple(builder, {init_i, output, scaled_diag_blocks});
std::unique_ptr<XlaBuilder> condb =
builder->CreateSubBuilder("InvertDiagCond");
{
auto i = GetTupleElement(
Parameter(condb.get(), 0, tuple_shape, "InvertDiagCondTuple"), 0);
Lt(i, ConstantR0<int32_t>(condb.get(), block_size));
}
TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
std::unique_ptr<XlaBuilder> bodyb =
builder->CreateSubBuilder("InvertDiagBody");
{
auto input_tuple =
Parameter(bodyb.get(), 0, tuple_shape, "InvertDiagBodyTuple");
auto i = GetTupleElement(input_tuple, 0);
auto body_out = GetTupleElement(input_tuple, 1);
auto body_input = GetTupleElement(input_tuple, 2);
auto zero = ConstantR0<int32_t>(bodyb.get(), 0);
auto j = lower_triangular ? i : ScalarLike(i, block_size - 1) - i;
auto input_row =
DynamicSlice(body_input, {zero, j, zero},
{num_blocks, 1, block_size});
DotDimensionNumbers dnums;
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
PrecisionConfig precision_proto;
precision_proto.add_operand_precision(precision);
precision_proto.add_operand_precision(precision);
auto update = -DotGeneral(input_row, body_out, dnums, &precision_proto);
body_out = DynamicUpdateSlice(body_out, update, {zero, j, zero});
auto next_i = i + ScalarLike(i, 1);
Tuple(bodyb.get(), {next_i, body_out, body_input});
}
TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
auto invert_while = While(cond, body, init);
auto inv_diag_blocks = GetTupleElement(invert_while, 1);
inv_diag_blocks = Div(inv_diag_blocks, diags,
{0, 1});
return Reshape(inv_diag_blocks, shape.dimensions());
});
}
XlaOp TriangularSolveExpander::SolveByInvertingDiagonalBlocks(
XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t ndims = a_shape.rank();
int64_t k = ShapeUtil::GetDimension(a_shape, -1);
if (unit_diagonal) {
a = lower ? Select(TriangleMask(a, -1), a, ZerosLike(a))
: Select(TriangleMask(a, 0), ZerosLike(a), a);
a = xla::Add(a, IdentityMatrix(builder, a_shape.element_type(), k, k),
{ndims - 2, ndims - 1});
} else {
a = Triangle(a, lower);
}
int64_t block_size = std::min(block_size_, k);
auto diag_blocks = DiagonalBlocks(a, block_size);
auto inv_diag_blocks = InvertDiagonalBlocks(diag_blocks, lower, precision);
return SolveWithInvertedDiagonalBlocks(a, b, inv_diag_blocks, left_side,
lower, transpose_a, conjugate_a,
precision);
});
}
XlaOp TriangularSolveExpander::SolveDirectly(
XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));
int64_t m = ShapeUtil::GetDimension(b_shape, -2);
int64_t n = ShapeUtil::GetDimension(b_shape, -1);
const int64_t a_size = ShapeUtil::GetDimension(a_shape, -1);
a = MaybeConjugate(a, conjugate_a);
bool backwards = transpose_a ^ lower ^ !left_side;
for (int64_t i = 0; i < a_size; ++i) {
int64_t j = backwards ? i : (a_size - i - 1);
std::vector<int64_t> b_row_start, b_row_end;
if (left_side) {
b_row_start = {j, 0};
b_row_end = {j + 1, n};
} else {
b_row_start = {0, j};
b_row_end = {m, j + 1};
}
auto b_row = SliceInMinorDims(b, b_row_start, b_row_end);
std::vector<int64_t> a_start = {j, backwards ? 0 : (j + 1)};
std::vector<int64_t> a_end = {j + 1, backwards ? j : a_size};
if (transpose_a ^ !left_side) {
std::swap(a_start[0], a_start[1]);
std::swap(a_end[0], a_end[1]);
}
auto a_chunk = SliceInMinorDims(a, a_start, a_end);
if (left_side) {
bool which = transpose_a ^ lower;
auto b_chunk =
SliceInMinorDims(b, {which ? 0 : (j + 1), 0}, {which ? j : m, n});
b_row = b_row - BatchDot(a_chunk, transpose_a, b_chunk,
false, precision);
} else {
bool which = transpose_a ^ !lower;
auto b_chunk =
SliceInMinorDims(b, {0, which ? 0 : (j + 1)}, {m, which ? j : n});
b_row = b_row - BatchDot(b_chunk, false, a_chunk,
transpose_a, precision);
}
if (!unit_diagonal) {
auto a_diag = SliceInMinorDims(a, {j, j}, {j + 1, j + 1});
b_row = b_row / a_diag;
}
b = UpdateSliceInMinorDims(b, b_row, b_row_start);
}
return b;
});
}
XlaOp TriangularSolveExpander::BuildTriangularSolve(
XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal, int64_t block_size,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));
if (a_shape.rank() != b_shape.rank()) {
return InvalidArgument(
"Arguments to TriangularSolve have shapes with different ranks: "
"%s vs. %s",
ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));
}
const int64_t ndims = a_shape.rank();
if (ndims < 2) {
return InvalidArgument(
"Arguments to TriangularSolve was rank %d but must have rank >= 2.",
ndims);
}
std::vector<int64_t> batch_dimensions;
int64_t batch = 1;
for (int i = 0; i < ndims - 2; ++i) {
int64_t a_size = a_shape.dimensions(i);
int64_t b_size = b_shape.dimensions(i);
if (a_size != b_size) {
return InvalidArgument(
"Batch dimensions of arguments to TriangularSolve must be equal; "
"shapes were %s and %s.",
ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));
}
batch_dimensions.push_back(a_size);
batch *= a_size;
}
if (ShapeUtil::GetDimension(a_shape, -1) !=
ShapeUtil::GetDimension(a_shape, -2)) {
return InvalidArgument(
"The 'a' argument to TriangularSolve must be a batched square matrix;"
" shape was: %s",
ShapeUtil::HumanString(a_shape));
}
const int64_t m = ShapeUtil::GetDimension(b_shape, -2);
const int64_t n = ShapeUtil::GetDimension(b_shape, -1);
if ((left_side ? m : n) != ShapeUtil::GetDimension(a_shape, -1)) {
return InvalidArgument(
"Arguments to TriangularSolve have incompatible matrix shapes %s and "
"%s",
ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));
}
int64_t a_size = ShapeUtil::GetDimension(a_shape, -1);
if (ShapeUtil::IsZeroElementArray(b_shape)) {
return b;
}
if (a_size == 1) {
return unit_diagonal ? b : Div(b, MaybeConjugate(a, conjugate_a));
}
if (UseDirectSolves() && batch > block_size_ / 16 &&
a_size < block_size_ / 4) {
return SolveDirectly(a, b, left_side, lower, transpose_a, conjugate_a,
unit_diagonal, precision);
} else {
return SolveByInvertingDiagonalBlocks(a, b, left_side, lower, transpose_a,
conjugate_a, unit_diagonal,
precision);
}
});
}
TriangularSolveExpander::TriangularSolveExpander(int64_t block_size)
: block_size_(block_size) {
CHECK_GE(block_size_, 1);
}
bool TriangularSolveExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kTriangularSolve;
}
absl::StatusOr<HloInstruction*> TriangularSolveExpander::ExpandInstruction(
HloInstruction* instruction) {
const TriangularSolveOptions& options =
instruction->triangular_solve_options();
const std::string name = absl::StrFormat(
"xla.triangular_solve_%s_%s_%s_%s_%s_%s",
instruction->operand(0)->shape().ToString(),
instruction->operand(1)->shape().ToString(),
options.left_side() ? "left" : "right",
options.lower() ? "lower" : "upper",
TriangularSolveOptions_Transpose_Name(options.transpose_a()),
options.unit_diagonal() ? "unit" : "nonunit");
HloModule* module = instruction->GetModule();
HloComputation*& computation =
computation_cache_.emplace(name, nullptr).first->second;
if (!computation) {
XlaBuilder builder(name);
XlaOp a = Parameter(&builder, 0, instruction->operand(0)->shape(), "a");
XlaOp b = Parameter(&builder, 1, instruction->operand(1)->shape(), "b");
bool transpose_a =
options.transpose_a() != TriangularSolveOptions::NO_TRANSPOSE;
bool conjugate_a = options.transpose_a() == TriangularSolveOptions::ADJOINT;
BuildTriangularSolve(a, b, options.left_side(), options.lower(),
transpose_a, conjugate_a, options.unit_diagonal(),
block_size_,
PrecisionConfig::HIGHEST);
TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, builder.Build());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
xla_computation.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(
xla_computation.proto(), config));
HloCloneContext context(module);
computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
}
return instruction->parent()->AddInstruction(HloInstruction::CreateCall(
instruction->shape(), instruction->operands(), computation));
}
} | #include "xla/service/triangular_solve_expander.h"
#include <memory>
#include <utility>
#include "xla/literal.h"
#include "xla/reference_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class TriangularExpanderTest : public HloTestBase,
public ::testing::WithParamInterface<int32_t> {};
TEST_P(TriangularExpanderTest, TestBlockSize) {
auto block_size = GetParam();
std::string hlo_string = R"(
HloModule TensorFlowTriangularSolve
ENTRY main {
a = f32[256,256]{1,0} parameter(0)
b = f32[256,192]{1,0} parameter(1)
ROOT triangular-solve = f32[256,192]{1,0} triangular-solve(a, b),
left_side=true, unit_diagonal=true,
lower=true, transpose_a=NO_TRANSPOSE
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
{
TriangularSolveExpander triangular_solve_expander(block_size);
TF_ASSERT_OK_AND_ASSIGN(
bool result, RunHloPass(&triangular_solve_expander, module.get()));
EXPECT_TRUE(result);
}
Array2D<float> a(256, 256);
for (int64_t row = 0; row < a.dim(0); ++row) {
a(row, row) = 1;
if (row > 0) {
a(row, row - 1) = 0.01;
}
}
Array2D<float> b(256, 192);
const float kMax = static_cast<float>(b.dim(0) * b.dim(1) + 1);
for (int64_t row = 0; row < b.dim(0); ++row) {
for (int64_t col = 0; col < b.dim(1); ++col) {
b(row, col) = static_cast<float>(row + col + 1) / kMax;
}
}
auto la = LiteralUtil::CreateR2FromArray2D(a);
auto lb = LiteralUtil::CreateR2FromArray2D(b);
TF_ASSERT_OK_AND_ASSIGN(Literal lx, Execute(std::move(module), {&la, &lb}));
auto x_shape = lx.shape();
EXPECT_EQ(x_shape.dimensions_size(), 2);
EXPECT_EQ(x_shape.dimensions(0), b.dim(0));
EXPECT_EQ(x_shape.dimensions(1), b.dim(1));
Array2D<float> x(x_shape.dimensions(0), x_shape.dimensions(1));
x.SetValues(lx.data<float>());
auto ref_b = ReferenceUtil::MatmulArray2D(a, x);
auto ref_lb = LiteralUtil::CreateR2FromArray2D(*ref_b);
EXPECT_TRUE(
LiteralTestUtil::NearOrEqual(ref_lb, lb, ErrorSpec{0.001, 0.001}));
}
INSTANTIATE_TEST_CASE_P(TriangularExpanderTestInstances, TriangularExpanderTest,
::testing::Range(2, 256, 7));
}
} |
1,857 | cpp | tensorflow/tensorflow | reduce_decomposer | third_party/xla/xla/service/reduce_decomposer.cc | third_party/xla/xla/service/reduce_decomposer_test.cc | #ifndef XLA_SERVICE_REDUCE_DECOMPOSER_H_
#define XLA_SERVICE_REDUCE_DECOMPOSER_H_
#include <functional>
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReduceDecomposer : public HloModulePass {
public:
explicit ReduceDecomposer(HloPredicate custom_layout_allowed = nullptr)
: custom_layout_allowed_(custom_layout_allowed) {}
absl::string_view name() const override { return "reduce-decomposer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloPredicate custom_layout_allowed_;
};
}
#endif
#include "xla/service/reduce_decomposer.h"
#include <functional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
namespace {
class VariadicReductionLayoutEqualizer : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction* hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
std::vector<HloInstruction*> new_inputs;
bool changed = false;
for (HloInstruction* input : reduce->inputs()) {
auto first_input = reduce->inputs()[0];
auto first_input_s = first_input->shape();
auto input_s = input->shape();
if (first_input_s.layout() != input_s.layout()) {
Shape new_input_s = ShapeUtil::MakeShapeWithDenseLayout(
input_s.element_type(), input_s.dimensions(),
first_input_s.layout().minor_to_major());
auto copy = MakeCopyHlo(input, new_input_s);
changed = true;
new_inputs.push_back(copy);
} else {
new_inputs.push_back(input);
}
}
if (changed) {
TF_ASSIGN_OR_RETURN(
auto new_reduce,
MakeReduceHlo(new_inputs, reduce->init_values(), reduce->dimensions(),
reduce->called_computations()[0]));
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, new_reduce));
}
return absl::OkStatus();
}
};
class ReduceDecomposerVisitor : public DfsHloRewriteVisitor {
public:
explicit ReduceDecomposerVisitor(HloPredicate custom_layout_allowed)
: custom_layout_allowed_(std::move(custom_layout_allowed)) {}
absl::Status HandleReduce(HloInstruction* hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
auto shape = reduce->shape();
if (custom_layout_allowed_ && custom_layout_allowed_(reduce)) {
return absl::OkStatus();
}
std::vector<Shape> expected_shapes(reduce->input_count());
for (int i = 0; i < reduce->input_count(); i++) {
expected_shapes[i] = ExpectedOutputShape(reduce, i);
TF_RET_CHECK(reduce->inputs()[i]->shape().layout() ==
reduce->inputs()[0]->shape().layout());
}
std::vector<Shape> output_shapes;
if (shape.IsTuple()) {
for (int i = 0; i < shape.tuple_shapes_size(); i++) {
output_shapes.push_back(ShapeUtil::GetTupleElementShape(shape, i));
TF_RET_CHECK(output_shapes[i].layout() == output_shapes[0].layout());
}
} else {
output_shapes.push_back(shape);
}
TF_RET_CHECK(!output_shapes.empty());
if (ShapeUtil::MakeMaybeTupleShape(expected_shapes) !=
ShapeUtil::MakeMaybeTupleShape(output_shapes)) {
TF_ASSIGN_OR_RETURN(auto r_prime,
MakeReduceHlo(reduce->inputs(), reduce->init_values(),
reduce->dimensions(),
reduce->called_computations()[0]));
TF_RET_CHECK(r_prime->shape() ==
ShapeUtil::MakeMaybeTupleShape(expected_shapes));
if (!shape.IsTuple()) {
auto copy = MakeCopyHlo(r_prime, shape);
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, copy));
return absl::OkStatus();
}
std::vector<HloInstruction*> copies;
for (int i = 0; i < reduce->input_count(); i++) {
TF_ASSIGN_OR_RETURN(auto from, GetOutput(r_prime, i));
auto copy = MakeCopyHlo(from, output_shapes[i]);
copies.push_back(copy);
}
auto out = MaybeMakeTuple(copies);
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, out));
}
return absl::OkStatus();
}
private:
absl::StatusOr<HloInstruction*> GetOutput(HloInstruction* instr, int idx) {
if (instr->shape().IsTuple()) {
return MakeGetTupleElementHlo(instr, idx);
} else {
TF_RET_CHECK(idx == 0);
return instr;
}
}
Shape ExpectedOutputShape(HloReduceInstruction* reduce, int input_idx) {
Shape reduce_shape = reduce->shape();
auto output_shape = reduce_shape.IsTuple()
? reduce_shape.tuple_shapes(input_idx)
: reduce_shape;
auto* operand = reduce->inputs()[input_idx];
auto operand_shape = operand->shape();
return ShapeUtil::DeleteDimensions(reduce->dimensions(), operand_shape);
}
HloPredicate custom_layout_allowed_;
};
}
absl::StatusOr<bool> ReduceDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed1,
VariadicReductionLayoutEqualizer{}.RunOnModule(
module, execution_threads));
TF_ASSIGN_OR_RETURN(
bool changed2,
ReduceDecomposerVisitor{custom_layout_allowed_}.RunOnModule(
module, execution_threads));
return changed1 || changed2;
}
} | #include "xla/service/reduce_decomposer.h"
#include <functional>
#include <memory>
#include <optional>
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ReduceDecomposerTest : public HloTestBase {};
TEST_F(ReduceDecomposerTest, ReducePerformsTransposition) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = add(a, b)
}
ENTRY c {
p = f32[5,3,4]{2,1,0} parameter(0)
z = f32[] constant(0)
ROOT r = f32[5,4]{0,1} reduce(p, z), dimensions={1}, to_apply=add
}
)";
RunAndFilecheckHloRewrite(
hlo,
ReduceDecomposer{[&](const HloInstruction*) {
return true;
}},
std::nullopt);
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},
R"(
)");
}
TEST_F(ReduceDecomposerTest, ReduceNaturalLayout) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = add(a, b)
}
ENTRY c {
p = f32[5,3,4]{2,1,0} parameter(0)
z = f32[] constant(0)
ROOT r = reduce(p, z), dimensions={1}, to_apply=add
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);
}
TEST_F(ReduceDecomposerTest, VariadicReductionWithTranspose) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{0,1,2},
u32[2,3,4]{0,1,2}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},
R"(
)");
}
TEST_F(ReduceDecomposerTest, VariadicReductionDescendingLayout) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);
}
TEST_F(ReduceDecomposerTest, VariadicReductionInputsDifferentLayout) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{2,1,3,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R"(
)");
}
}
} |
1,858 | cpp | tensorflow/tensorflow | name_uniquer | third_party/xla/xla/service/name_uniquer.cc | third_party/xla/xla/service/name_uniquer_test.cc | #ifndef XLA_SERVICE_NAME_UNIQUER_H_
#define XLA_SERVICE_NAME_UNIQUER_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/types.h"
namespace xla {
class NameUniquer {
public:
explicit NameUniquer(const std::string& separator = "__");
std::string GetUniqueName(absl::string_view prefix = "");
static std::string GetSanitizedName(absl::string_view name);
private:
class SequentialIdGenerator {
public:
SequentialIdGenerator() = default;
int64_t RegisterId(int64_t id) {
if (used_.insert(id).second) {
return id;
}
while (!used_.insert(next_).second) {
++next_;
}
return next_++;
}
private:
int64_t next_ = 0;
absl::flat_hash_set<int64_t> used_;
};
std::string separator_;
absl::flat_hash_map<std::string, SequentialIdGenerator> generated_names_;
NameUniquer(const NameUniquer&) = delete;
NameUniquer& operator=(const NameUniquer&) = delete;
};
}
#endif
#include "xla/service/name_uniquer.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/primitive_util.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
bool IsAllowed(char character) {
auto c = static_cast<unsigned char>(character);
return (absl::ascii_isalnum(c) != 0) || c == '_' || c == '.' || c == '-';
}
}
NameUniquer::NameUniquer(const std::string& separator) {
CHECK(absl::c_all_of(separator, IsAllowed))
<< "separator should comprises allowed characters only";
separator_ = separator;
}
std::string NameUniquer::GetSanitizedName(absl::string_view name) {
if (name.empty()) {
return "";
}
std::string result(name);
char c = static_cast<unsigned char>(result[0]);
if (!absl::ascii_isalpha(c) && c != '_') {
result[0] = '_';
}
for (int i = 1, iter_limit = result.length(); i < iter_limit; i++) {
if (!IsAllowed(result[i])) {
result[i] = '_';
}
}
if (primitive_util::IsPrimitiveTypeName(result) && result != "tuple") {
result += "_";
}
if (absl::StartsWith(result, "__") && !absl::StartsWith(result, "__xla_")) {
result[0] = 'a';
}
return result;
}
std::string NameUniquer::GetUniqueName(absl::string_view prefix) {
std::string root =
GetSanitizedName(prefix.empty() ? "name" : std::string(prefix));
bool has_numeric_suffix = false;
int64_t numeric_suffix = 0;
size_t separator_index = root.rfind(separator_);
if (separator_index != std::string::npos && (separator_index > 0) &&
(separator_index < root.size() - 1)) {
std::string after_suffix = root.substr(separator_index + 1);
if (absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
has_numeric_suffix = true;
root = root.substr(0, separator_index);
} else {
numeric_suffix = 0;
}
}
SequentialIdGenerator& id_generator = generated_names_[root];
numeric_suffix = id_generator.RegisterId(numeric_suffix);
if (numeric_suffix == 0) {
return has_numeric_suffix ? absl::StrCat(root, separator_, 0) : root;
}
absl::StrAppend(&root, separator_, numeric_suffix);
return root;
}
} | #include "xla/service/name_uniquer.h"
#include <memory>
#include <utility>
#include <vector>
#include "tsl/platform/test.h"
namespace xla {
namespace {
class NameUniquerTest : public ::testing::Test {};
TEST_F(NameUniquerTest, SimpleUniquer) {
NameUniquer uniquer;
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo__1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo__2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar", uniquer.GetUniqueName("bar"));
EXPECT_EQ("foo__3", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar__1", uniquer.GetUniqueName("bar"));
EXPECT_EQ("qux", uniquer.GetUniqueName("qux"));
}
TEST_F(NameUniquerTest, DifferentSeparator) {
NameUniquer uniquer(".");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar", uniquer.GetUniqueName("bar"));
EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar"));
}
TEST_F(NameUniquerTest, NumericSuffixes) {
NameUniquer uniquer(".");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.55.1", uniquer.GetUniqueName("foo.55.1"));
EXPECT_EQ("foo.55.0", uniquer.GetUniqueName("foo.55.1"));
EXPECT_EQ("bar.1000", uniquer.GetUniqueName("bar.1000"));
EXPECT_EQ("bar.2000", uniquer.GetUniqueName("bar.2000"));
EXPECT_EQ("bar.-2000", uniquer.GetUniqueName("bar.-2000"));
EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar.1"));
}
TEST_F(NameUniquerTest, PrefixHasSuffix) {
NameUniquer uniquer(".");
EXPECT_EQ("foo.11.0", uniquer.GetUniqueName("foo.11.0"));
EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
}
TEST_F(NameUniquerTest, Sanitize) {
NameUniquer uniquer("_");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo_1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo_54", uniquer.GetUniqueName("foo_54"));
EXPECT_EQ("foo_54.1", uniquer.GetUniqueName("foo_54.1"));
EXPECT_EQ("foo_2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar_1000", uniquer.GetUniqueName("bar<1000"));
EXPECT_EQ("bar_2000", uniquer.GetUniqueName("bar<2000"));
EXPECT_EQ("bar_1", uniquer.GetUniqueName("bar_1"));
EXPECT_EQ("_10", uniquer.GetUniqueName(
".10"));
EXPECT_EQ("_10_1", uniquer.GetUniqueName(".10"));
EXPECT_EQ("_10_2", uniquer.GetUniqueName("_10"));
EXPECT_EQ("foobar_", uniquer.GetUniqueName("foobar_"));
EXPECT_EQ("foobar__1", uniquer.GetUniqueName("foobar_"));
}
TEST_F(NameUniquerTest, KeepNamesInRandomOrder) {
NameUniquer uniquer(".");
EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
EXPECT_EQ("foo.10", uniquer.GetUniqueName("foo.10"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo.1"));
EXPECT_EQ("foo.12", uniquer.GetUniqueName("foo.12"));
EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo.3"));
}
TEST_F(NameUniquerTest, AvoidKeywords) {
NameUniquer uniquer(".");
EXPECT_EQ("f32_", uniquer.GetUniqueName("f32"));
EXPECT_EQ("s64_", uniquer.GetUniqueName("s64"));
EXPECT_EQ("pred_", uniquer.GetUniqueName("pred"));
EXPECT_NE(uniquer.GetUniqueName("__xla_").find("__xla_"), std::string::npos);
EXPECT_EQ(uniquer.GetUniqueName("__abx").find("__"), std::string::npos);
EXPECT_EQ("tuple", uniquer.GetUniqueName("tuple"));
EXPECT_EQ("F32", uniquer.GetUniqueName("F32"));
EXPECT_EQ("S32", uniquer.GetUniqueName("S32"));
EXPECT_EQ("Pred", uniquer.GetUniqueName("Pred"));
}
}
} |
1,859 | cpp | tensorflow/tensorflow | float_normalization | third_party/xla/xla/service/float_normalization.cc | third_party/xla/xla/service/float_normalization_test.cc | #ifndef XLA_SERVICE_FLOAT_NORMALIZATION_H_
#define XLA_SERVICE_FLOAT_NORMALIZATION_H_
#include <string>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class FloatNormalization : public HloModulePass {
public:
explicit FloatNormalization(const FloatSupport* float_support)
: float_support_(float_support),
name_("float-normalization-" +
primitive_util::LowercasePrimitiveTypeName(
float_support_->LowPrecisionType())) {}
~FloatNormalization() override = default;
absl::string_view name() const override { return name_; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const FloatSupport* float_support_;
std::string name_;
};
class BFloat16MixedPrecisionRemoval : public HloModulePass {
public:
BFloat16MixedPrecisionRemoval() = default;
~BFloat16MixedPrecisionRemoval() override = default;
absl::string_view name() const override {
return "bf16-mixed-precision-removal";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
FloatNormalization normalization(&no_mixed_precision_support_);
return normalization.Run(module, execution_threads);
}
private:
class BFloat16SupportForMixedPrecisionRemoval : public FloatSupport {
public:
BFloat16SupportForMixedPrecisionRemoval() : FloatSupport(BF16) {}
~BFloat16SupportForMixedPrecisionRemoval() override = default;
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return true;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return true;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return false;
}
} no_mixed_precision_support_;
};
}
#endif
#include "xla/service/float_normalization.h"
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
class FloatNormalizationVisitor : public DfsHloVisitorWithDefault {
public:
explicit FloatNormalizationVisitor(const FloatSupport* float_support,
FloatNormalization* float_normalization)
: computation_(nullptr),
float_support_(float_support),
float_normalization_(float_normalization) {}
bool changed() const { return changed_; }
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status Preprocess(HloInstruction* hlo) override;
private:
absl::Status HandleInstruction(HloInstruction* hlo);
absl::Status HandleMultipleOutputs(HloInstruction* hlo);
absl::StatusOr<HloInstruction*> ConvertType(HloInstruction* hlo,
PrimitiveType from,
PrimitiveType to,
HloComputation* computation);
absl::Status InsertConvertAfterOutput(HloInstruction* hlo, PrimitiveType from,
PrimitiveType to,
HloComputation* computation);
absl::Status ChangeOutputTypeThenInsertConvertBack(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation);
absl::Status InsertConvertBeforeOperand(HloInstruction* hlo,
int64_t operand_idx,
PrimitiveType from, PrimitiveType to,
HloComputation* computation);
absl::Status ConvertCalledComputations(
HloInstruction* hlo,
absl::Span<HloComputation* const> low_precision_called_comps);
PrimitiveType LowPrecisionType() const {
return float_support_->LowPrecisionType();
}
PrimitiveType HighPrecisionType() const {
return float_support_->HighPrecisionType();
}
HloComputation* computation_;
const FloatSupport* float_support_;
FloatNormalization* float_normalization_;
bool changed_ = false;
};
int64_t CountSubshapesWithMatchingType(const Shape& shape, PrimitiveType type) {
int64_t count = 0;
ShapeUtil::ForEachSubshape(
shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() == type) {
++count;
}
});
return count;
}
int64_t ShapeLeafCount(const Shape& shape) {
int64_t count = 0;
ShapeUtil::ForEachSubshape(
shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (ShapeUtil::IsLeafIndex(shape, index)) {
++count;
}
});
return count;
}
absl::StatusOr<HloInstruction*> FloatNormalizationVisitor::ConvertType(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation) {
if (CountSubshapesWithMatchingType(hlo->shape(), from) == 0) {
return hlo;
}
if (hlo->opcode() == HloOpcode::kConvert &&
hlo->operand(0)->shape().element_type() == to &&
to == LowPrecisionType() && from == HighPrecisionType()) {
return hlo->mutable_operand(0);
}
TF_ASSIGN_OR_RETURN(
auto new_hlo,
computation->DeepCopyInstructionWithCustomCopier(
hlo, [&](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
const auto& original_subshape =
ShapeUtil::GetSubshape(hlo->shape(), leaf_index);
if (original_subshape.element_type() != from) {
return leaf;
}
auto new_subshape =
ShapeUtil::ChangeElementType(original_subshape, to);
float_normalization_->UpdateLayout(&new_subshape);
return computation->AddInstruction(
HloInstruction::CreateConvert(new_subshape, leaf));
}));
return new_hlo;
}
absl::Status FloatNormalizationVisitor::InsertConvertAfterOutput(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation) {
bool is_root = computation->root_instruction() == hlo;
std::vector<HloInstruction*> materialized_users = hlo->users();
TF_ASSIGN_OR_RETURN(auto new_hlo, ConvertType(hlo, from, to, computation));
if (new_hlo == hlo) {
return absl::OkStatus();
}
for (auto* user : materialized_users) {
TF_RETURN_IF_ERROR(hlo->ReplaceUseWithDifferentShape(user, new_hlo));
}
if (is_root) {
computation->set_root_instruction(new_hlo, true);
}
changed_ = true;
return absl::OkStatus();
}
absl::Status FloatNormalizationVisitor::ChangeOutputTypeThenInsertConvertBack(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation) {
auto original_shape = hlo->shape();
if (CountSubshapesWithMatchingType(original_shape, from) == 0) {
return absl::OkStatus();
}
ShapeUtil::ForEachMutableSubshape(
hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) {
if (subshape->element_type() == from) {
subshape->set_element_type(to);
}
});
float_normalization_->UpdateLayout(hlo->mutable_shape());
bool is_root = computation->root_instruction() == hlo;
std::vector<HloInstruction*> materialized_users = hlo->users();
TF_ASSIGN_OR_RETURN(
auto new_hlo,
computation->DeepCopyInstructionWithCustomCopier(
hlo, [&](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
const auto& original_subshape =
ShapeUtil::GetSubshape(original_shape, leaf_index);
if (original_subshape.element_type() ==
leaf->shape().element_type()) {
return leaf;
}
return computation->AddInstruction(
HloInstruction::CreateConvert(original_subshape, leaf));
}));
std::vector<HloInstruction*> conversions_to_simplify;
for (auto* user : materialized_users) {
if (user->opcode() == HloOpcode::kConvert &&
user->shape().element_type() == to && to == HighPrecisionType() &&
from == LowPrecisionType()) {
conversions_to_simplify.emplace_back(user);
} else {
TF_RETURN_IF_ERROR(hlo->ReplaceUseWithDifferentShape(user, new_hlo));
}
}
for (auto* convert : conversions_to_simplify) {
TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(hlo));
}
if (is_root) {
computation->set_root_instruction(new_hlo, true);
}
changed_ = true;
return absl::OkStatus();
}
absl::Status FloatNormalizationVisitor::InsertConvertBeforeOperand(
HloInstruction* hlo, int64_t operand_idx, PrimitiveType from,
PrimitiveType to, HloComputation* computation) {
auto operand = hlo->mutable_operand(operand_idx);
TF_ASSIGN_OR_RETURN(auto new_operand,
ConvertType(operand, from, to, computation));
if (new_operand == operand) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWithDifferentShape(operand_idx, new_operand));
changed_ = true;
return absl::OkStatus();
}
absl::Status FloatNormalizationVisitor::ConvertCalledComputations(
HloInstruction* hlo,
absl::Span<HloComputation* const> low_precision_called_comps) {
absl::flat_hash_map<HloComputation*, HloComputation*> cloned_computations;
for (auto& comp : low_precision_called_comps) {
auto cloned = comp->parent()->AddEmbeddedComputation(comp->Clone());
cloned_computations[comp] = cloned;
changed_ = true;
}
hlo->ReplaceCalledComputations([&](HloComputation* comp) {
auto it = cloned_computations.find(comp);
if (it != cloned_computations.end()) {
return it->second;
}
return comp;
});
for (auto& comp_pair : cloned_computations) {
auto comp = comp_pair.second;
TF_RETURN_IF_ERROR(InsertConvertAfterOutput(comp->root_instruction(),
LowPrecisionType(),
HighPrecisionType(), comp));
for (auto* param : comp->parameter_instructions()) {
TF_RETURN_IF_ERROR(ChangeOutputTypeThenInsertConvertBack(
param, LowPrecisionType(), HighPrecisionType(), comp));
}
}
return absl::OkStatus();
}
bool ShouldAvoidNormalizingComputationsForInstruction(HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kReduceScatter;
}
absl::Status FloatNormalizationVisitor::HandleMultipleOutputs(
HloInstruction* hlo) {
std::vector<PrimitiveType> operand_types(hlo->operand_count());
std::vector<PrimitiveType> output_types(hlo->operand_count());
int64_t high_prec_count = 0;
int64_t low_prec_count = 0;
bool has_unsupported_low_prec_operand = false;
bool has_unsupported_low_prec_output = false;
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
CHECK(hlo->operand(i)->shape().IsArray());
CHECK(ShapeUtil::GetSubshape(hlo->shape(), {i}).IsArray());
operand_types[i] = hlo->operand(i)->shape().element_type();
output_types[i] = ShapeUtil::GetSubshape(hlo->shape(), {i}).element_type();
if (operand_types[i] == HighPrecisionType()) {
high_prec_count += 1;
} else if (operand_types[i] == LowPrecisionType()) {
low_prec_count += 1;
if (!float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
has_unsupported_low_prec_operand = true;
}
}
if (output_types[i] == HighPrecisionType()) {
high_prec_count += 1;
} else if (output_types[i] == LowPrecisionType()) {
low_prec_count += 1;
if (!float_support_->SupportsLowPrecisionOutput(*hlo)) {
has_unsupported_low_prec_output = true;
}
}
}
if (low_prec_count == 0) {
return absl::OkStatus();
}
auto should_convert_operand = [&](int64_t i) {
if (operand_types[i] != LowPrecisionType()) {
return false;
}
if (!float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
return true;
}
if (float_support_->SupportsMixedPrecisions(*hlo)) {
return false;
}
return has_unsupported_low_prec_operand ||
has_unsupported_low_prec_output || high_prec_count > 0;
};
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
if (should_convert_operand(i)) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, LowPrecisionType(), HighPrecisionType(), computation_));
high_prec_count += 1;
low_prec_count -= 1;
}
}
if (!has_unsupported_low_prec_output &&
(float_support_->SupportsMixedPrecisions(*hlo) || high_prec_count == 0 ||
low_prec_count == 0)) {
return absl::OkStatus();
}
std::vector<HloComputation*> low_precision_called_comps;
for (auto* comp : hlo->called_computations()) {
if (ShouldAvoidNormalizingComputationsForInstruction(hlo)) {
continue;
}
bool comp_has_low_precision = false;
if (comp->root_instruction()->shape().element_type() ==
HighPrecisionType()) {
high_prec_count += 1;
} else if (comp->root_instruction()->shape().element_type() ==
LowPrecisionType()) {
low_prec_count += 1;
comp_has_low_precision = true;
}
for (auto* param : comp->parameter_instructions()) {
if (param->shape().element_type() == HighPrecisionType()) {
high_prec_count += 1;
} else if (param->shape().element_type() == LowPrecisionType()) {
low_prec_count += 1;
comp_has_low_precision = true;
}
}
if (comp_has_low_precision) {
low_precision_called_comps.push_back(comp);
}
}
std::vector<HloInstruction*> materialized_users = hlo->users();
std::vector<HloInstruction*> output_elements(hlo->operand_count());
auto original_shape = hlo->shape();
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i});
if (output_types[i] != LowPrecisionType()) {
output_elements[i] = computation_->AddInstruction(
HloInstruction::CreateGetTupleElement(*subshape, hlo, i));
continue;
}
subshape->set_element_type(HighPrecisionType());
float_normalization_->UpdateLayout(subshape);
auto gte = computation_->AddInstruction(
HloInstruction::CreateGetTupleElement(*subshape, hlo, i));
auto shape = ShapeUtil::ChangeElementType(*subshape, LowPrecisionType());
float_normalization_->UpdateLayout(&shape);
output_elements[i] =
computation_->AddInstruction(HloInstruction::CreateConvert(shape, gte));
}
auto tuple = computation_->AddInstruction(
HloInstruction::CreateTuple(output_elements));
*tuple->mutable_shape() = hlo->shape();
for (auto* user : materialized_users) {
TF_RETURN_IF_ERROR(hlo->ReplaceUseWith(user, tuple));
}
bool is_root = computation_->root_instruction() == hlo;
if (is_root) {
computation_->set_root_instruction(tuple);
}
*tuple->mutable_shape() = original_shape;
return ConvertCalledComputations(hlo, low_precision_called_comps);
}
absl::Status FloatNormalizationVisitor::HandleInstruction(HloInstruction* hlo) {
int high_prec_count = 0;
int low_prec_count = 0;
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
high_prec_count += CountSubshapesWithMatchingType(hlo->operand(i)->shape(),
HighPrecisionType());
low_prec_count += CountSubshapesWithMatchingType(hlo->operand(i)->shape(),
LowPrecisionType());
}
high_prec_count +=
CountSubshapesWithMatchingType(hlo->shape(), HighPrecisionType());
low_prec_count +=
CountSubshapesWithMatchingType(hlo->shape(), LowPrecisionType());
std::vector<HloComputation*> low_precision_called_comps;
for (auto* comp : hlo->called_computations()) {
if (ShouldAvoidNormalizingComputationsForInstruction(hlo)) {
continue;
}
bool comp_has_low_precision = false;
high_prec_count += CountSubshapesWithMatchingType(
comp->root_instruction()->shape(), HighPrecisionType());
int64_t low_prec_count_comp_root = CountSubshapesWithMatchingType(
comp->root_instruction()->shape(), LowPrecisionType());
if (low_prec_count_comp_root > 0) {
low_prec_count += low_prec_count_comp_root;
comp_has_low_precision = true;
}
for (auto* param : comp->parameter_instructions()) {
high_prec_count +=
CountSubshapesWithMatchingType(param->shape(), HighPrecisionType());
int64_t low_prec_count_comp_param =
CountSubshapesWithMatchingType(param->shape(), LowPrecisionType());
if (low_prec_count_comp_param > 0) {
low_prec_count += low_prec_count_comp_param;
comp_has_low_precision = true;
}
}
if (comp_has_low_precision) {
low_precision_called_comps.push_back(comp);
}
}
for (int i = 0; i < hlo->operand_count(); ++i) {
int64_t low_prec_count_in_operand = CountSubshapesWithMatchingType(
hlo->operand(i)->shape(), LowPrecisionType());
if (low_prec_count_in_operand > 0 &&
!float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, LowPrecisionType(), HighPrecisionType(), computation_));
low_prec_count -= low_prec_count_in_operand;
high_prec_count += low_prec_count_in_operand;
}
}
if (!float_support_->SupportsLowPrecisionOutput(*hlo)) {
int64_t low_prec_count_in_hlo =
CountSubshapesWithMatchingType(hlo->shape(), LowPrecisionType());
if (low_prec_count_in_hlo > 0) {
TF_RETURN_IF_ERROR(ChangeOutputTypeThenInsertConvertBack(
hlo, LowPrecisionType(), HighPrecisionType(), computation_));
low_prec_count -= low_prec_count_in_hlo;
high_prec_count += low_prec_count_in_hlo;
}
}
if (float_support_->SupportsMixedPrecisions(*hlo) || low_prec_count == 0 ||
high_prec_count == 0) {
return absl::OkStatus();
}
if (hlo->called_computations().empty() &&
CountSubshapesWithMatchingType(hlo->shape(), LowPrecisionType()) ==
ShapeLeafCount(hlo->shape())) {
bool can_use_low_prec = true;
for (int i = 0; i < hlo->operand_count(); ++i) {
if (CountSubshapesWithMatchingType(hlo->operand(i)->shape(),
LowPrecisionType()) ==
ShapeLeafCount(hlo->operand(i)->shape())) {
continue;
}
if ((float_support_->EffectiveOperandPrecisionIsLowPrecision(*hlo, i) ||
float_support_->EffectiveOperandPrecisionIsOutputPrecision(*hlo,
i)) &&
float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
continue;
}
can_use_low_prec = false;
break;
}
if (can_use_low_prec) {
for (int i = 0; i < hlo->operand_count(); ++i) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, HighPrecisionType(), LowPrecisionType(), computation_));
}
return absl::OkStatus();
}
}
TF_RETURN_IF_ERROR(ChangeOutputTypeThenInsertConvertBack(
hlo, LowPrecisionType(), HighPrecisionType(), computation_));
for (int i = 0; i < hlo->operand_count(); ++i) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, LowPrecisionType(), HighPrecisionType(), computation_));
}
return ConvertCalledComputations(hlo, low_precision_called_comps);
}
absl::Status FloatNormalizationVisitor::DefaultAction(HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kDomain ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kFusion ||
hlo->opcode() == HloOpcode::kConvert ||
hlo->opcode() == HloOpcode::kCall ||
hlo->opcode() == HloOpcode::kCustomCall ||
hlo->opcode() == HloOpcode::kWhile ||
hlo->opcode() == HloOpcode::kConditional ||
hlo->opcode() == HloOpcode::kBitcastConvert ||
hlo->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
if ((hlo->opcode() == HloOpcode::kSort ||
hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kReduceScatter) &&
hlo->shape().IsTuple()) {
return HandleMultipleOutputs(hlo);
}
return HandleInstruction(hlo);
}
absl::Status FloatNormalizationVisitor::Preprocess(HloInstruction* hlo) {
computation_ = hlo->parent();
return absl::OkStatus();
}
absl::flat_hash_set<HloComputation*>
CloneComputationsForNonNormalizingInstructions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module, execution_threads);
absl::flat_hash_set<HloComputation*> computations_to_skip;
for (const CallGraphNode& node : call_graph->nodes()) {
bool has_normalizing_users = false;
bool has_users_to_skip_normalization = false;
for (const CallSite& site : node.caller_callsites()) {
if (ShouldAvoidNormalizingComputationsForInstruction(
site.instruction())) {
has_users_to_skip_normalization = true;
} else {
has_normalizing_users = true;
}
}
if (!has_users_to_skip_normalization) {
continue;
}
if (!has_normalizing_users) {
computations_to_skip.insert(node.computation());
continue;
}
HloComputation* clone = module->DeepCloneComputation(node.computation());
for (const CallSite& site : node.caller_callsites()) {
if (ShouldAvoidNormalizingComputationsForInstruction(
site.instruction())) {
site.instruction()->ReplaceCalledComputations(
[&](HloComputation* called) {
return called == node.computation() ? clone : called;
});
}
}
computations_to_skip.insert(clone);
}
return computations_to_skip;
}
}
absl::StatusOr<bool> FloatNormalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "FloatNormalization::Run() for " +
primitive_util::LowercasePrimitiveTypeName(
float_support_->LowPrecisionType()) +
", before:\n" + module->ToString());
auto computations_to_visit =
module->MakeComputationPostOrder(execution_threads);
auto computations_to_skip =
CloneComputationsForNonNormalizingInstructions(module, execution_threads);
FloatNormalizationVisitor visitor(float_support_, this);
for (auto* comp : computations_to_visit) {
if (computations_to_skip.contains(comp)) continue;
TF_RETURN_IF_ERROR(comp->Accept(&visitor));
}
XLA_VLOG_LINES(2, "FloatNormalization::Run() for " +
primitive_util::LowercasePrimitiveTypeName(
float_support_->LowPrecisionType()) +
", after:\n" + module->ToString());
if (visitor.changed()) {
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(tuple_simplifier.Run(module).status());
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module).status());
}
return visitor.changed();
}
} | #include "xla/service/float_normalization.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
class TestFloatSupport : public FloatSupport {
public:
explicit TestFloatSupport(PrimitiveType low_precision_type,
PrimitiveType high_precision_type)
: FloatSupport(low_precision_type, high_precision_type) {}
~TestFloatSupport() override = default;
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll) {
return true;
}
if (hlo.opcode() == HloOpcode::kDot) {
return operand_index == 0;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kDot || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll) {
return true;
}
return false;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement) {
return true;
}
return false;
}
};
class TestFloatNoComputeSupport : public FloatSupport {
public:
explicit TestFloatNoComputeSupport(PrimitiveType low_precision_type,
PrimitiveType high_precision_type)
: FloatSupport(low_precision_type, high_precision_type) {}
~TestFloatNoComputeSupport() override = default;
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll ||
hlo.opcode() == HloOpcode::kAllReduce ||
hlo.opcode() == HloOpcode::kReduceScatter) {
return true;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll ||
hlo.opcode() == HloOpcode::kAllReduce ||
hlo.opcode() == HloOpcode::kReduceScatter) {
return true;
}
return false;
}
};
class FloatNormalizationTest : public HloTestBase {
protected:
FloatNormalizationTest()
: HloTestBase(false,
true) {}
bool Normalize(HloModule* module, PrimitiveType low_precision_type = BF16,
PrimitiveType high_precision_type = F32) {
TestFloatSupport float_support(low_precision_type, high_precision_type);
FloatNormalization normalization(&float_support);
absl::StatusOr<bool> result = normalization.Run(module);
EXPECT_IS_OK(result.status());
HloVerifier verifier(false,
true);
EXPECT_IS_OK(verifier.Run(module).status());
return result.value();
}
};
TEST_F(FloatNormalizationTest, NoopIfSupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, add0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction(), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveIfUnsupportedBF16) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kMultiply, a, b));
HloInstruction* mul1 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kMultiply, mul0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(computation->root_instruction()->operand(0), mul1);
EXPECT_EQ(mul0->shape().element_type(), F32);
EXPECT_EQ(mul1->shape().element_type(), F32);
EXPECT_EQ(mul1->operand(0)->opcode(), HloOpcode::kConvert);
}
TEST_F(FloatNormalizationTest, ResolveUnsupportedMixedPrecisionSubtraction) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* sub0 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kSubtract, a, b));
HloInstruction* sub1 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kSubtract, sub0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(computation->root_instruction()->operand(0), sub1);
EXPECT_EQ(sub0->shape().element_type(), F32);
EXPECT_EQ(sub1->shape().element_type(), F32);
EXPECT_EQ(sub1->operand(0)->opcode(), HloOpcode::kConvert);
}
TEST_F(FloatNormalizationTest, ResolveUnsupportedMixedPrecisionReduce) {
Shape f32_input_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape f32_output_shape = ShapeUtil::MakeShape(F32, {4});
Shape bf16_scalar_shape = ShapeUtil::MakeShape(BF16, {});
auto reduce_comp_builder = HloComputation::Builder("reduce_comp");
auto reduce_comp_param0 = reduce_comp_builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_scalar_shape, "param0"));
auto reduce_comp_param1 = reduce_comp_builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_scalar_shape, "param1"));
reduce_comp_builder.AddInstruction(
HloInstruction::CreateBinary(bf16_scalar_shape, HloOpcode::kAdd,
reduce_comp_param0, reduce_comp_param1));
auto module = CreateNewVerifiedModule();
auto reduce_computation =
module->AddEmbeddedComputation(reduce_comp_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_input_shape, "a"));
HloInstruction* init = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_scalar_shape, "init"));
HloInstruction* reduce = builder.AddInstruction(HloInstruction::CreateReduce(
f32_output_shape, input, init, {0}, reduce_computation));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction(), reduce);
EXPECT_EQ(reduce->called_computations().size(), 1);
EXPECT_EQ(reduce->called_computations()[0]->num_parameters(), 2);
EXPECT_EQ(reduce->called_computations()[0]
->parameter_instruction(0)
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->called_computations()[0]
->parameter_instruction(1)
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->called_computations()[0]
->root_instruction()
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->shape().element_type(), F32);
EXPECT_EQ(reduce->operand(0), input);
EXPECT_EQ(input->shape().element_type(), F32);
EXPECT_EQ(reduce->operand(1)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(reduce->operand(1)->shape().element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleAllReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({f32_shape, bf16_shape}), {a, b}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(bf16_shape, crs, 1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(1)->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleAllToAllToBF16) {
auto module = CreateNewVerifiedModule(TestName(), 2);
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
std::vector<ReplicaGroup> replica_groups(1);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(1);
HloInstruction* a2a = builder.AddInstruction(HloInstruction::CreateAllToAll(
ShapeUtil::MakeTupleShape({bf16_shape, bf16_shape}), {a, a},
CollectiveDeviceList(replica_groups), false,
std::nullopt));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction(), a2a);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {0}).element_type(), BF16);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {1}).element_type(), BF16);
EXPECT_EQ(a2a->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(a2a->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(a2a->operand(1)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(a2a->operand(1)->shape().element_type(), BF16);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleAllToAllToF32) {
auto module = CreateNewVerifiedModule(TestName(), 2);
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
std::vector<ReplicaGroup> replica_groups(1);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(1);
HloInstruction* a2a = builder.AddInstruction(HloInstruction::CreateAllToAll(
ShapeUtil::MakeTupleShape({bf16_shape, f32_shape}), {a, a},
CollectiveDeviceList(replica_groups), false,
std::nullopt));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kTuple);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {0}).element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {1}).element_type(), F32);
EXPECT_EQ(a2a->operand(0)->opcode(), HloOpcode::kParameter);
EXPECT_EQ(a2a->operand(0)->shape().element_type(), F32);
EXPECT_EQ(a2a->operand(1)->opcode(), HloOpcode::kParameter);
EXPECT_EQ(a2a->operand(1)->shape().element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleSort) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {1024});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {1024});
Shape s32_shape = ShapeUtil::MakeShape(BF16, {1024});
HloInstruction* key = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "key"));
HloInstruction* value = builder.AddInstruction(
HloInstruction::CreateParameter(1, s32_shape, "value"));
TF_ASSERT_OK_AND_ASSIGN(
auto* sort,
MakeSortHlo(ShapeUtil::MakeTupleShape({bf16_shape, s32_shape}),
{key, value}, 0, false, &builder,
module.get()));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(bf16_shape, sort, 0));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(sort->operand(0)->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(sort->shape(), {0}).element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleSortRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {1024});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {1024});
HloInstruction* key = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "key"));
HloInstruction* value = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "value"));
TF_ASSERT_OK_AND_ASSIGN(
auto* sort,
MakeSortHlo(ShapeUtil::MakeTupleShape({bf16_shape, f32_shape}),
{key, value}, 0, false, &builder,
module.get()));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(sort->operand(0)->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(sort->shape(), {0}).element_type(), F32);
EXPECT_NE(computation->root_instruction(), sort);
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kTuple);
EXPECT_EQ(sort->to_apply()->parameter_instruction(1)->shape().element_type(),
F32);
auto users = sort->to_apply()->parameter_instruction(1)->users();
for (auto user : users) {
EXPECT_NE(user->opcode(), HloOpcode::kConvert);
}
}
TEST_F(FloatNormalizationTest, DoNotAddUnsupportedMixedPrecision) {
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {4, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
HloInstruction* dot = builder.AddInstruction(
HloInstruction::CreateDot(bf16_shape, a, b, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(dot->shape().element_type(), F32);
EXPECT_EQ(dot->operand(0)->shape().element_type(), F32);
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(dot->operand(1)->shape().element_type(), F32);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kConvert);
}
TEST_F(FloatNormalizationTest, DoNotChangeBitcastConvert) {
auto builder = HloComputation::Builder(TestName());
Shape u16_shape = ShapeUtil::MakeShape(U16, {4, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {4, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, u16_shape, "a"));
builder.AddInstruction(HloInstruction::CreateBitcastConvert(bf16_shape, a));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
auto root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBitcastConvert);
EXPECT_EQ(root->shape().element_type(), BF16);
EXPECT_EQ(root->operand(0)->shape().element_type(), U16);
}
TEST_F(FloatNormalizationTest, ResolveIfUnsupportedF8e5m2) {
auto builder = HloComputation::Builder(TestName());
Shape f16_shape = ShapeUtil::MakeShape(F16, {2, 4});
Shape f8_shape = ShapeUtil::MakeShape(F8E5M2, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f16_shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, f8_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f16_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(f8_shape, HloOpcode::kMultiply, a, b));
HloInstruction* mul1 = builder.AddInstruction(
HloInstruction::CreateBinary(f8_shape, HloOpcode::kMultiply, mul0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get(), F8E5M2, F16));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(computation->root_instruction()->operand(0), mul1);
EXPECT_EQ(mul0->shape().element_type(), F16);
EXPECT_EQ(mul1->shape().element_type(), F16);
EXPECT_EQ(mul1->operand(0)->opcode(), HloOpcode::kConvert);
}
class FloatNormalizationNoComputeSupportTest : public FloatNormalizationTest {
protected:
bool Normalize(HloModule* module, PrimitiveType low_precision_type = BF16,
PrimitiveType high_precision_type = F32) {
TestFloatNoComputeSupport float_support(low_precision_type,
high_precision_type);
FloatNormalization normalization(&float_support);
absl::StatusOr<bool> result = normalization.Run(module);
EXPECT_IS_OK(result.status());
HloVerifier verifier(false,
true);
EXPECT_IS_OK(verifier.Run(module).status());
return result.value();
}
};
TEST_F(FloatNormalizationNoComputeSupportTest,
NoNormalizationForToApplyMultiOutputAllReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
Shape bf16_shape_b = ShapeUtil::MakeShape(BF16, {16, 16});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape_b, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({bf16_shape_a, bf16_shape_b}), {a, b},
reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(bf16_shape_b, crs, 1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(1)->shape().element_type(), BF16);
EXPECT_EQ(crs->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), BF16);
}
TEST_F(FloatNormalizationNoComputeSupportTest,
NormalizationClonesSharedApplyAllReduceAndReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
Shape bf16_shape_b = ShapeUtil::MakeShape(BF16, {2, 4, 2});
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape_b, "b"));
Shape bf16_scalar_shape = ShapeUtil::MakeShape(BF16, {});
HloInstruction* init = builder.AddInstruction(
HloInstruction::CreateParameter(2, bf16_scalar_shape, "init"));
HloInstruction* all_reduce = builder.AddInstruction(
HloInstruction::CreateAllReduce(bf16_shape_a, {a}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
HloInstruction* reduce = builder.AddInstruction(
HloInstruction::CreateReduce(bf16_shape_a, b, init, {2}, reduction));
builder.AddInstruction(HloInstruction::CreateBinary(
bf16_shape_a, HloOpcode::kAdd, all_reduce, reduce));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(all_reduce->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(all_reduce->to_apply()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(all_reduce->to_apply()->root_instruction()->shape().element_type(),
BF16);
EXPECT_EQ(reduce->called_computations().size(), 1);
EXPECT_EQ(reduce->called_computations()[0]
->root_instruction()
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->called_computations()[0]->root_instruction()->opcode(),
HloOpcode::kConvert);
EXPECT_EQ(reduce->shape().element_type(), F32);
}
TEST_F(FloatNormalizationNoComputeSupportTest,
NoNormalizationForToApplyAllReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
HloInstruction* crs = builder.AddInstruction(
HloInstruction::CreateAllReduce(bf16_shape_a, {a}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(crs->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
}
TEST_F(FloatNormalizationNoComputeSupportTest,
NoNormalizationForToApplyReduceScatter) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
Shape bf16_shape_scattered = ShapeUtil::MakeShape(BF16, {1, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
HloInstruction* crs =
builder.AddInstruction(HloInstruction::CreateReduceScatter(
bf16_shape_scattered, {a}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false, 0));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(crs->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
}
TEST_F(FloatNormalizationTest, ConvertBeforeTuple) {
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kMultiply, a, b));
HloInstruction* convert =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, add));
builder.AddInstruction(HloInstruction::CreateVariadic(
ShapeUtil::MakeTupleShape({f32_shape, bf16_shape}), HloOpcode::kTuple,
{convert, add}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get(), BF16));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kTuple);
EXPECT_EQ(computation->root_instruction()->operand(0)->shape().element_type(),
F32);
EXPECT_EQ(
computation->root_instruction()->shape().tuple_shapes(0).element_type(),
F32);
}
} |
1,860 | cpp | tensorflow/tensorflow | hlo_phi_graph | third_party/xla/xla/service/hlo_phi_graph.cc | third_party/xla/xla/service/hlo_phi_graph_test.cc | #ifndef XLA_SERVICE_HLO_PHI_GRAPH_H_
#define XLA_SERVICE_HLO_PHI_GRAPH_H_
#include <iterator>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_value.h"
namespace xla {
class PhiGraph {
public:
void RegisterPhi(const HloValue& value,
absl::Span<const HloValue* const> inputs);
HloValue::Id GetOptimizedId(const HloValue& value);
bool InputsEqualTo(const HloValue& value,
absl::Span<const HloValue* const> inputs);
HloValue::Id FindOptimizedValue(const HloValue::Id id);
void Optimize();
std::string ToString();
private:
struct Node {
bool is_phi;
std::vector<Node*> users;
std::vector<Node*> operands;
HloValue::Id value_id;
bool mark_as_dead = false;
};
Node* CreateOrReuseNode(const HloValue& value);
void ReplaceNodeWith(Node* node, Node* replace);
absl::flat_hash_map<Node*, std::vector<HloValue::Id>> node_to_value_id_;
absl::flat_hash_map<HloValue::Id, Node*> value_id_to_node_;
std::vector<std::unique_ptr<Node>> node_storage_;
};
}
#endif
#include "xla/service/hlo_phi_graph.h"
#include <queue>
namespace xla {
HloValue::Id PhiGraph::GetOptimizedId(const HloValue& value) {
Node* node = value_id_to_node_[value.id()];
CHECK(!node->mark_as_dead);
return node->value_id;
}
bool PhiGraph::InputsEqualTo(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
auto iter = value_id_to_node_.find(value.id());
CHECK(iter != value_id_to_node_.end());
absl::flat_hash_set<HloValue::Id> existing_set;
for (Node* operand : iter->second->operands) {
existing_set.insert(operand->value_id);
}
absl::flat_hash_set<HloValue::Id> new_set;
for (const HloValue* input : inputs) {
new_set.insert(input->id());
}
return existing_set == new_set;
}
HloValue::Id PhiGraph::FindOptimizedValue(const HloValue::Id id) {
auto iter = value_id_to_node_.find(id);
CHECK(iter != value_id_to_node_.end());
CHECK(!iter->second->mark_as_dead);
return iter->second->value_id;
}
PhiGraph::Node* PhiGraph::CreateOrReuseNode(const HloValue& value) {
auto iter = value_id_to_node_.find(value.id());
if (iter == value_id_to_node_.end()) {
node_storage_.emplace_back(std::make_unique<Node>());
Node* node = node_storage_.back().get();
node->value_id = value.id();
value_id_to_node_[value.id()] = node;
node_to_value_id_[node].push_back(value.id());
return node;
} else {
CHECK_NE(iter->second, nullptr);
CHECK_EQ(iter->second->value_id, value.id());
return iter->second;
}
}
void PhiGraph::ReplaceNodeWith(PhiGraph::Node* node, PhiGraph::Node* replace) {
CHECK(node->is_phi);
if (node->mark_as_dead) {
return;
}
if (replace->mark_as_dead) {
auto iter = value_id_to_node_.find(replace->value_id);
CHECK(iter != value_id_to_node_.end());
return ReplaceNodeWith(node, iter->second);
}
CHECK(!replace->mark_as_dead);
for (Node* user : node->users) {
absl::c_replace(user->operands, node, replace);
}
for (Node* operand : node->operands) {
absl::c_replace(operand->users, node, replace);
}
for (HloValue::Id value_id : node_to_value_id_[node]) {
CHECK(value_id_to_node_.contains(value_id));
value_id_to_node_[value_id] = replace;
}
absl::c_copy(node_to_value_id_[node],
std::back_inserter(node_to_value_id_[replace]));
node_to_value_id_[node].clear();
node->mark_as_dead = true;
}
void PhiGraph::RegisterPhi(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
Node* node = CreateOrReuseNode(value);
CHECK(value.is_phi());
node->is_phi = true;
node->operands.clear();
for (auto input : inputs) {
CHECK(input != nullptr);
Node* input_node = CreateOrReuseNode(*input);
node->operands.push_back(input_node);
}
}
std::string PhiGraph::ToString() {
std::string out = "PhiGraph: \n";
for (auto& node : node_storage_) {
absl::StrAppend(&out, node->value_id);
if (node->is_phi) {
absl::StrAppend(&out, ", phi");
}
if (node->mark_as_dead) {
absl::StrAppend(&out, ", dead", ":\n");
}
for (Node* input : node->operands) {
absl::StrAppend(&out, " ", input->value_id, "\n");
}
}
return out;
}
void PhiGraph::Optimize() {
VLOG(2) << "Optimizing phi graph:";
XLA_VLOG_LINES(2, ToString());
for (auto& node : node_storage_) {
for (Node* input : node->operands) {
input->users.push_back(node.get());
}
}
bool changed = true;
while (changed) {
changed = false;
absl::flat_hash_set<Node*> checked_for_closure;
for (auto& node : node_storage_) {
if (!node->is_phi) {
continue;
}
if (node->mark_as_dead) {
continue;
}
Node* node_ptr = node.get();
VLOG(2) << "Optimizing: " << node_ptr->value_id;
CHECK_GE(node_ptr->operands.size(), 1);
auto it = absl::c_find(node_ptr->operands, node_ptr);
while (it != node_ptr->operands.end()) {
node_ptr->operands.erase(it);
it = absl::c_find(node_ptr->operands, node_ptr);
}
it = absl::c_find(node_ptr->users, node_ptr);
while (it != node_ptr->users.end()) {
node_ptr->users.erase(it);
it = absl::c_find(node_ptr->users, node_ptr);
}
CHECK_GE(node_ptr->operands.size(), 1);
bool all_inputs_are_same = absl::c_all_of(
node_ptr->operands,
[&](Node* elem) { return elem == node_ptr->operands[0]; });
if (all_inputs_are_same) {
VLOG(1) << "All inputs to node " << node_ptr->value_id
<< " are the same, replacing it with "
<< node_ptr->operands[0]->value_id;
ReplaceNodeWith(node_ptr, node_ptr->operands[0]);
changed = true;
continue;
}
if (checked_for_closure.contains(node_ptr)) {
continue;
}
absl::flat_hash_set<Node*> workset;
std::queue<Node*> worklist;
Node* non_phi = nullptr;
worklist.push(node_ptr);
while (!worklist.empty()) {
Node* todo = worklist.front();
worklist.pop();
if (workset.contains(todo)) {
continue;
}
checked_for_closure.insert(todo);
workset.insert(todo);
for (Node* operand : todo->operands) {
worklist.push(operand);
}
if (!todo->is_phi) {
if (non_phi != nullptr && non_phi != todo) {
non_phi = nullptr;
break;
} else {
non_phi = todo;
}
}
}
if (non_phi != nullptr) {
for (Node* node : workset) {
if (!node->is_phi) {
CHECK_EQ(node, non_phi);
continue;
}
VLOG(1) << "Replace node " << node->value_id
<< " in the closure with node " << non_phi->value_id;
ReplaceNodeWith(node, non_phi);
changed = true;
}
}
}
}
}
} | #include "xla/service/hlo_phi_graph.h"
#include "xla/literal_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class PhiGraphTest : public ::testing::Test {
protected:
HloValue NewHloValue(bool is_phi) {
static int64_t id = 0;
return HloValue(id++, dummy_inst_.get(), {}, is_phi);
}
void SetUp() override {
dummy_inst_ = HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f));
}
std::unique_ptr<HloInstruction> dummy_inst_;
};
TEST_F(PhiGraphTest, SelfReferencingPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, PhiWithSameInputs) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &A});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, CircularPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&D, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
}
TEST_F(PhiGraphTest, NestedPhiReduction) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
HloValue E = NewHloValue(true);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&E, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.RegisterPhi(E, {&D, &D});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(E.id()));
}
}
} |
1,861 | cpp | tensorflow/tensorflow | while_loop_fusible_sinking | third_party/xla/xla/service/while_loop_fusible_sinking.cc | third_party/xla/xla/service/while_loop_fusible_sinking_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_FUSIBLE_SINKING_H_
#define XLA_SERVICE_WHILE_LOOP_FUSIBLE_SINKING_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class WhileLoopFusibleSinking : public HloModulePass {
public:
WhileLoopFusibleSinking() = default;
~WhileLoopFusibleSinking() override = default;
absl::string_view name() const override {
return "while-loop-fusible-sinking";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> TrySinkingFusiblesIntoWhileLoop(
HloInstruction* while_instr);
bool IsSinkableFusion(HloInstruction* while_operand);
HloInstruction* CreateSinkableFusion(HloInstruction* while_operand);
absl::flat_hash_map<HloComputation*, int> call_counts_;
};
}
#endif
#include "xla/service/while_loop_fusible_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsPurelyExpanding(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kBroadcast ||
(instr->opcode() == HloOpcode::kConstant &&
instr->shape().rank() == 0) ||
instr->opcode() == HloOpcode::kIota;
}
bool IsFusionCandidate(const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kRng &&
(instr->IsElementwise() || instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose);
}
}
bool WhileLoopFusibleSinking::IsSinkableFusion(HloInstruction* while_operand) {
absl::InlinedVector<HloInstruction*, 8> worklist;
absl::flat_hash_set<int> visited;
worklist.push_back(while_operand);
while (!worklist.empty()) {
HloInstruction* to_process = worklist.back();
worklist.pop_back();
if (!to_process->IsFusible()) {
return false;
}
if (!visited.insert(to_process->unique_id()).second) {
if (visited.size() > 100) {
return false;
}
continue;
}
if (IsPurelyExpanding(to_process)) {
continue;
}
if (IsFusionCandidate(to_process)) {
for (auto* op : to_process->operands()) {
worklist.push_back(op);
}
continue;
}
return false;
}
return true;
}
HloInstruction* WhileLoopFusibleSinking::CreateSinkableFusion(
HloInstruction* while_operand) {
HloInstruction* fusion =
while_operand->AddInstruction(while_operand->CreateFusion(
while_operand->shape(), HloInstruction::FusionKind::kLoop,
while_operand));
bool did_fuse = IsFusionCandidate(while_operand);
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
continue;
}
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
did_fuse = true;
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
}
return fusion;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::TrySinkingFusiblesIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
if (call_counts_[while_body] > 1 || call_counts_[while_cond] > 1) {
return false;
}
HloInstruction* init_value = while_instr->mutable_operand(0);
if (init_value->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
conditional_gte_index_to_insts =
WhileUtil::GetGTEsMapForWhileConditional(*while_cond);
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
std::vector<int64_t> tuple_indices;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
if (while_instr->operand_count() == 0 || init_value->operand_count() == 0) {
CHECK_EQ(while_instr->user_count(), 0);
VLOG(3) << "Each element in the operand tuple of the while instruction '"
<< while_instr->name()
<< "' was an invariant value, whose usage has been replaced "
" directly by the value.";
break;
}
HloInstruction* invariant_value = init_value->mutable_operand(index);
if (absl::c_any_of(invariant_body_gte->users(),
[](const HloInstruction* use) {
switch (use->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kSlice:
return true;
default:
return false;
}
})) {
continue;
}
if (init_value->IsRoot() || init_value->user_count() > 1) {
init_value = init_value->AddInstruction(init_value->Clone());
TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(0, init_value));
}
if (!IsSinkableFusion(invariant_value)) {
continue;
}
HloInstruction* fusion = CreateSinkableFusion(invariant_value);
changed = true;
if (fusion->operand_count() > 0 &&
(while_instr->IsRoot() ||
absl::c_any_of(while_instr->users(), [&](HloInstruction* use) {
return use->opcode() != HloOpcode::kGetTupleElement;
}))) {
auto uses = while_instr->users();
std::vector<HloInstruction*> gtes(init_value->operand_count());
for (int64_t i = 0; i < gtes.size(); ++i) {
gtes[i] = while_instr->AddInstruction(
HloInstruction::CreateGetTupleElement(while_instr, i));
}
HloInstruction* tuple =
while_instr->AddInstruction(HloInstruction::CreateTuple(gtes));
if (while_instr->IsRoot()) {
while_instr->parent()->set_root_instruction(tuple);
}
if (!uses.empty()) {
TF_RETURN_IF_ERROR(while_instr->ReplaceUsesWith(uses, tuple));
}
}
absl::InlinedVector<HloInstruction*, 2> invariant_output_uses;
for (auto use : while_instr->users()) {
if (use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index) {
invariant_output_uses.push_back(use);
}
}
for (auto use : invariant_output_uses) {
TF_RETURN_IF_ERROR(
while_instr->parent()->ReplaceInstruction(use, invariant_value));
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* parameter = while_body->parameter_instruction(0);
tuple_indices.resize(fusion->operand_count());
int64_t next_index = init_value->operand_count();
new_operands.resize(fusion->operand_count());
for (int64_t i = 0; i < fusion->operand_count(); ++i) {
init_value->AppendOperand(fusion->mutable_operand(i));
parameter->mutable_shape()->mutable_tuple_shapes()->push_back(
fusion->mutable_operand(i)->shape());
new_operands[i] = root->AddInstruction(
HloInstruction::CreateGetTupleElement(parameter, next_index++));
root->AppendOperand(new_operands[i]);
}
*(init_value->mutable_shape()) = parameter->shape();
*(while_instr->mutable_shape()) = parameter->shape();
*(while_cond->parameter_instruction(0)->mutable_shape()) =
parameter->shape();
*(root->mutable_shape()) = parameter->shape();
auto cloned_fusion = while_body->AddInstruction(
fusion->CloneWithNewOperands(fusion->shape(), new_operands));
TF_RETURN_IF_ERROR(fusion->parent()->RemoveInstruction(fusion));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstruction(invariant_body_gte, cloned_fusion));
TF_RETURN_IF_ERROR(cloned_fusion->Defuse());
}
return changed;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_counts_.clear();
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
call_counts_[while_instr->while_body()]++;
call_counts_[while_instr->while_condition()]++;
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingFusiblesIntoWhileLoop(while_instr));
changed |= result;
}
return changed;
}
} | #include "xla/service/while_loop_fusible_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopFusibleSinkingTest = HloTestBase;
TEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] parameter(0)
const_1 = f32[2] iota(), iota_dimension=0
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Iota()), _));
}
TEST_F(WhileLoopFusibleSinkingTest, SinkMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()),
op::Broadcast())),
_, _));
}
TEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
z = s32[] constant(0)
j = s32[] constant(3)
ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7}
r = f32[7] reshape(ds)
b = f32[5,7] broadcast(r), dimensions={1}
a = add(b, p_body.0)
add.0 = add(a, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
EXPECT_FALSE(changed);
}
}
} |
1,862 | cpp | tensorflow/tensorflow | dynamic_dimension_inference | third_party/xla/xla/service/dynamic_dimension_inference.cc | third_party/xla/xla/service/dynamic_dimension_inference_test.cc | #ifndef XLA_SERVICE_DYNAMIC_DIMENSION_INFERENCE_H_
#define XLA_SERVICE_DYNAMIC_DIMENSION_INFERENCE_H_
#include <cstdint>
#include <functional>
#include <map>
#include <set>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
enum OpDynamismSupport : uint8_t {
kNoSupport = 0,
kOptional,
kRequired,
};
using OpSupportsDynamismHandler =
std::function<OpDynamismSupport(HloInstruction*)>;
class DynamicDimensionInference {
public:
enum ShapeCheckMode {
kInvalid = 0,
kCompileTime,
kRuntime,
kIgnore,
};
using CustomCallInferenceHandler =
std::function<absl::Status(HloInstruction*, DynamicDimensionInference*)>;
using AssertionGenerator = std::function<void(HloInstruction*)>;
static absl::StatusOr<DynamicDimensionInference> Run(
HloModule* module,
OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr,
CustomCallInferenceHandler custom_call_handler = nullptr,
ShapeCheckMode shape_check_mode = ShapeCheckMode::kIgnore,
const AssertionGenerator& assertion_generator = nullptr,
const absl::flat_hash_set<absl::string_view>& execution_threads_ = {});
std::string ToString() const;
HloInstruction* GetDynamicSize(HloInstruction* inst, const ShapeIndex& index,
int64_t dim) const;
const HloInstruction* GetDynamicSize(const HloInstruction* inst,
const ShapeIndex& index,
int64_t dim) const;
std::vector<HloInstruction*> GetDynamicSizes(HloInstruction* inst,
const ShapeIndex& index) const;
bool HasDynamicDimension(HloInstruction* inst,
ShapeIndexView index = {}) const;
absl::Status ForwardDynamicSize(HloInstruction* inst,
HloInstruction* new_inst,
const ShapeIndex& index);
void SetDynamicSize(HloInstruction* inst, const ShapeIndex& index,
int64_t dim, HloInstruction* size);
void ReplaceAllDynamicDimensionUsesWith(HloInstruction* replace,
HloInstruction* with);
Shape GetDynamicShape(HloInstruction* inst);
bool CanInfer(HloInstruction* hlo);
bool changed() const { return changed_; }
friend class DynamicDimensionInferenceVisitor;
private:
explicit DynamicDimensionInference(
HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,
CustomCallInferenceHandler custom_call_handler,
ShapeCheckMode shape_check_mode, AssertionGenerator assertion_generator,
const absl::flat_hash_set<absl::string_view>& execution_threads_);
struct DynamicDimension {
HloInstruction* inst;
ShapeIndex index;
int64_t dim;
template <typename H>
friend H AbslHashValue(H h, const DynamicDimension& m) {
return H::combine(std::move(h), m.inst, m.index, m.dim);
}
friend bool operator==(const DynamicDimension& lhs,
const DynamicDimension& rhs) {
return lhs.inst == rhs.inst && lhs.index == rhs.index &&
lhs.dim == rhs.dim;
}
std::tuple<int, int, std::string, int64_t> ToTuple() const {
return std::make_tuple(
inst && inst->GetModule() ? inst->GetModule()->unique_id() : -1,
inst ? inst->unique_id() : -1, index.ToString(), dim);
}
friend bool operator<(const DynamicDimension& lhs,
const DynamicDimension& rhs) {
return lhs.ToTuple() < rhs.ToTuple();
}
};
void CopyMapping(HloInstruction* from, HloInstruction* to,
const absl::flat_hash_map<HloInstruction*, HloInstruction*>*
dynamic_size_map = nullptr);
absl::Status AnalyzeDynamicDimensions();
HloModule* module_;
using DynamicMapping = std::map<DynamicDimension, HloInstruction*>;
DynamicMapping dynamic_mapping_;
using PerHloDynamicDimensions =
ConstHloInstructionMap<std::set<DynamicDimension>>;
PerHloDynamicDimensions per_hlo_dynamic_dimensions_;
OpSupportsDynamismHandler op_supports_dynamism_handler_;
CustomCallInferenceHandler custom_call_handler_;
ShapeCheckMode shape_check_mode_;
AssertionGenerator assertion_generator_;
bool changed_ = false;
const absl::flat_hash_set<absl::string_view>& execution_threads_;
};
}
#endif
#include "xla/service/dynamic_dimension_inference.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dynamic_window_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_util.h"
#include "xla/service/while_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenComputation(HloComputation* narrow_comp, const Shape& wide_shape) {
TF_RET_CHECK(wide_shape.IsTuple());
const Shape& narrow_shape = narrow_comp->parameter_instruction(0)->shape();
if (Shape::Equal()(wide_shape, narrow_shape)) {
return std::make_pair(narrow_comp, CallInliner::InlinedInstructionMap());
}
HloComputation* wide_comp = [&]() {
HloComputation::Builder builder(absl::StrCat("wide.", narrow_comp->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_comp->parameter_instruction(0)->name())));
return narrow_comp->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_comp->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
narrow_comp->parameter_instruction(0)->name()));
HloInstruction* call_narrow_comp = wide_comp->AddInstruction(
HloInstruction::CreateCall(narrow_comp->root_instruction()->shape(),
{truncated_parameter}, narrow_comp));
wide_comp->set_root_instruction(call_narrow_comp,
true);
TF_ASSIGN_OR_RETURN(auto inline_map, CallInliner::Inline(call_narrow_comp));
return std::make_pair(wide_comp, std::move(inline_map));
}
}
class DynamicDimensionInferenceVisitor : public DfsHloRewriteVisitor {
public:
explicit DynamicDimensionInferenceVisitor(
const DynamicParameterBinding& param_bindings,
HloDataflowAnalysis& dataflow_analysis, DynamicDimensionInference* parent,
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler,
DynamicDimensionInference::ShapeCheckMode shape_check_mode,
DynamicDimensionInference::AssertionGenerator assertion_generator)
: param_bindings_(param_bindings),
dataflow_analysis_(dataflow_analysis),
parent_(parent),
custom_call_handler_(std::move(custom_call_handler)),
shape_check_mode_(shape_check_mode),
assertion_generator_(assertion_generator) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
static absl::StatusOr<bool> Run(
HloComputation* computation, HloDataflowAnalysis& dataflow_analysis,
const DynamicParameterBinding& param_bindings,
DynamicDimensionInference* parent,
DynamicDimensionInference::CustomCallInferenceHandler
custom_call_handler = nullptr,
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore,
const DynamicDimensionInference::AssertionGenerator& assertion_generator =
nullptr) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
parent->execution_threads_)) {
return false;
}
DynamicDimensionInferenceVisitor visitor(
param_bindings, dataflow_analysis, parent,
std::move(custom_call_handler), shape_check_mode, assertion_generator);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
if (visitor.shape_assertion_ != nullptr) {
CHECK(assertion_generator);
assertion_generator(visitor.shape_assertion_);
}
return visitor.changed();
}
absl::Status HandleParameter(HloInstruction* hlo) override;
absl::Status HandleInfeed(HloInstruction* hlo) override;
absl::Status HandleConstant(HloInstruction* hlo) override;
absl::Status HandleReduce(HloInstruction* hlo) override;
absl::Status HandleDot(HloInstruction* hlo) override;
absl::Status HandleTuple(HloInstruction* hlo) override;
absl::Status HandleTranspose(HloInstruction* hlo) override;
absl::Status HandleDynamicReshape(HloInstruction* hlo) override;
absl::Status HandleReshape(HloInstruction* hlo) override;
absl::Status HandleSort(HloInstruction* hlo) override;
absl::Status HandlePad(HloInstruction* hlo) override;
absl::Status HandleCustomCall(HloInstruction* hlo) override;
absl::Status HandleBroadcast(HloInstruction* hlo) override;
absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSelect(HloInstruction* hlo) override;
absl::Status HandleConvolution(HloInstruction* hlo) override;
absl::Status HandleConcatenate(HloInstruction* hlo) override;
absl::Status HandleReduceWindow(HloInstruction* hlo) override;
absl::Status HandleReverse(HloInstruction* hlo) override;
absl::Status HandleSelectAndScatter(HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(HloInstruction* hlo) override;
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override;
absl::Status HandleElementwiseNary(HloInstruction* hlo);
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override;
absl::Status HandleClamp(HloInstruction* hlo) override;
absl::Status HandleConditional(HloInstruction* hlo) override;
absl::Status HandleWhile(HloInstruction* hlo) override;
absl::Status HandleSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override;
absl::Status HandleGather(HloInstruction* hlo) override;
absl::Status HandleScatter(HloInstruction* hlo) override;
absl::Status HandleMap(HloInstruction* hlo) override;
absl::Status HandleDomain(HloInstruction* hlo) override;
absl::Status HandleAsyncStart(HloInstruction* hlo) override;
absl::Status HandleAsyncDone(HloInstruction* hlo) override;
private:
using OperandDynamicDimensionFn = absl::FunctionRef<absl::Status(
HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size)>;
using DynamicDimensionFn = std::function<absl::Status(
ShapeIndex index, int64_t dimension, HloInstruction* dynamic_size)>;
void SetDynamicSize(HloInstruction* inst, const ShapeIndex& index,
int64_t dim, HloInstruction* size,
bool clear_dynamic_dimension = true);
void SetDynamicSizes(HloInstruction* inst, const ShapeIndex& index,
absl::Span<HloInstruction* const> sizes);
absl::Status HandleDynamicConvolutionForward(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension,
HloInstruction* dynamic_size);
absl::Status HandleDynamicConvolutionKernelGrad(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension);
absl::Status HandleDynamicConvolutionInputGrad(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension);
absl::Status HandleDynamicWindowSamePadding(HloInstruction* hlo,
HloInstruction* dynamic_size,
int64_t operand_index,
int64_t dimension);
absl::Status ForEachOperandDynamicDimension(HloInstruction* inst,
OperandDynamicDimensionFn);
absl::Status ForEachDynamicDimensionInOperand(HloInstruction* inst,
int64_t operand_index,
OperandDynamicDimensionFn);
absl::Status ForEachDynamicDimension(HloInstruction* inst,
const DynamicDimensionFn& fn);
bool CanInfer(HloInstruction* hlo) { return parent_->CanInfer(hlo); }
absl::StatusOr<bool> RequiresPadToStatic(HloInstruction* instr,
ShapeIndex shape_index);
absl::Status InsertPadToStaticOnInstruction(HloInstruction* inst);
absl::Status InsertShapeCheck(HloInstruction* dim1, HloInstruction* dim2,
bool support_implicit_broadcast);
absl::Status PassThroughDynamicDimension(HloInstruction*);
const DynamicParameterBinding& param_bindings_;
HloDataflowAnalysis& dataflow_analysis_;
DynamicDimensionInference* parent_;
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler_;
DynamicDimensionInference::ShapeCheckMode shape_check_mode_;
HloInstruction* shape_assertion_ = nullptr;
DynamicDimensionInference::AssertionGenerator assertion_generator_;
};
void DynamicDimensionInferenceVisitor::SetDynamicSize(
HloInstruction* inst, const ShapeIndex& index, int64_t dim,
HloInstruction* size, bool clear_dynamic_dimension) {
parent_->SetDynamicSize(inst, index, dim, size);
if (clear_dynamic_dimension) {
ShapeUtil::GetMutableSubshape(inst->mutable_shape(), index)
->set_dynamic_dimension(dim, false);
}
MarkAsChanged();
}
void DynamicDimensionInferenceVisitor::SetDynamicSizes(
HloInstruction* inst, const ShapeIndex& index,
absl::Span<HloInstruction* const> sizes) {
const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);
CHECK(subshape.IsArray() && subshape.rank() == sizes.size());
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (sizes[dimension] != nullptr) {
SetDynamicSize(inst, index, dimension, sizes[dimension]);
}
}
}
absl::Status DynamicDimensionInferenceVisitor::DefaultAction(
HloInstruction* hlo) {
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
return UnimplementedStrCat(
"Asked to propagate a dynamic dimension from hlo ", operand->name(),
"@", index.ToString(), "@", dimension, " to hlo ", hlo->ToString(),
", which is not implemented.");
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (hlo->tuple_index() != index[0]) {
return absl::OkStatus();
}
ShapeIndex new_index(ShapeIndexView(index).subspan(1));
SetDynamicSize(hlo, new_index, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleTuple(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction*, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
index.push_front(operand_index);
SetDynamicSize(hlo, index, dimension, dynamic_size);
return absl::OkStatus();
}));
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleBroadcast(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
int64_t broadcast_dim = hlo->dimensions(dimension);
SetDynamicSize(hlo, {}, broadcast_dim, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConstant(
HloInstruction* hlo) {
if (!hlo->shape().is_dynamic()) {
return absl::OkStatus();
}
auto* constant = Cast<HloConstantInstruction>(hlo);
ShapeTree<bool> do_pad(constant->shape(), false);
Shape padded_shape = constant->shape();
bool pad_any = false;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableSubshapeWithStatus(
&padded_shape,
[&](Shape* subshape, const ShapeIndex& index) -> absl::Status {
if (!subshape->IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool requires_pad, RequiresPadToStatic(hlo, index));
if (requires_pad) {
pad_any = *do_pad.mutable_element(index) = true;
*subshape = ShapeUtil::MakeStaticShape(*subshape);
}
return absl::OkStatus();
}));
if (!pad_any) {
return absl::OkStatus();
}
Literal padded_literal(padded_shape);
do_pad.ForEachElement([&](const ShapeIndex& index, bool requires_pad) {
const Shape& subshape = ShapeUtil::GetSubshape(padded_shape, index);
if (!subshape.IsArray()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(padded_literal.CopyFrom(constant->literal(), index,
index,
true));
if (!requires_pad) {
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (subshape.is_dynamic_dimension(dimension)) {
padded_literal.SetDynamicSize(
dimension, index,
constant->literal().GetDynamicSize(dimension, index));
}
}
}
return absl::OkStatus();
});
auto* padded_constant = hlo->AddInstruction(
HloInstruction::CreateConstant(std::move(padded_literal)));
TF_RETURN_IF_ERROR(constant->ReplaceAllUsesWith(padded_constant));
SetVisited(*padded_constant);
TF_RETURN_IF_ERROR(do_pad.ForEachElementWithStatus(
[&](const ShapeIndex& index, bool requires_pad) -> absl::Status {
if (!requires_pad) {
return absl::OkStatus();
}
const Shape& subshape =
ShapeUtil::GetSubshape(constant->shape(), index);
TF_RET_CHECK(subshape.IsArray());
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (!subshape.is_dynamic_dimension(dimension)) {
continue;
}
HloInstruction* dynamic_size = hlo->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
constant->literal().GetDynamicSize(dimension, index))));
SetVisited(*dynamic_size);
SetDynamicSize(padded_constant, index, dimension, dynamic_size);
}
return absl::OkStatus();
}));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleCustomCall(
HloInstruction* hlo) {
if (hlo->custom_call_target() == "PadToStatic") {
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (hlo->operand(0)->shape().is_dynamic_dimension(i)) {
HloInstruction* dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeScalarShape(S32), hlo, i + 1));
ShapeIndex data_output = {0};
SetDynamicSize(hlo, data_output, i, dynamic_size);
}
}
return absl::OkStatus();
}
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
if (custom_call_handler_) {
TF_RETURN_IF_ERROR(custom_call_handler_(hlo, parent_));
} else {
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
if (hlo->custom_call_target() == "SliceToDynamic" ||
hlo->custom_call_target() == "Sharding" ||
(absl::StartsWith(hlo->custom_call_target(), "Resize") &&
(dimension == 0 || dimension == 3))) {
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
}
if (hlo->custom_call_target() == "DynamicReduceWindowSamePadding") {
if (hlo->operand_count() > 2) {
return Unimplemented(
"DynamicReduceWindowSamePadding doesn't support variadic "
"reduce window %s",
hlo->ToString());
}
return HandleDynamicWindowSamePadding(hlo, dynamic_size,
operand_index, dimension);
}
if (hlo->custom_call_target() ==
"DynamicSelectAndScatterSamePadding") {
if (operand_index == 1) {
return absl::OkStatus();
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
}
if (hlo->custom_call_target() == "DynamicConvolutionInputGrad") {
return HandleDynamicConvolutionInputGrad(hlo, operand_index,
dimension);
}
if (hlo->custom_call_target() == "DynamicConvolutionKernelGrad") {
return HandleDynamicConvolutionKernelGrad(hlo, operand_index,
dimension);
}
if (hlo->custom_call_target() == "DynamicConvolutionForward") {
return HandleDynamicConvolutionForward(hlo, operand_index,
dimension, dynamic_size);
}
return Unimplemented(
"CustomCall \"%s\" is not supported to have a dynamic dimension",
hlo->custom_call_target());
}));
}
return InsertPadToStaticOnInstruction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleSort(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dynamic_dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
if (sort->values_count() == 0) {
SetDynamicSize(hlo, {}, dynamic_dimension, dynamic_size);
} else {
SetDynamicSize(hlo, {operand_index}, dynamic_dimension, dynamic_size);
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandlePad(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (operand_index != 0) {
return Unimplemented(
"Dynamic dimension on padding value is not supported");
}
const PaddingConfig_PaddingConfigDimension& padding_config =
hlo->padding_config().dimensions(dimension);
HloInstruction* dynamic_size_adjusted = dynamic_size;
if (padding_config.interior_padding() != 0) { | #include "xla/service/dynamic_dimension_inference.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class DynamicDimensionInferenceTest : public HloTestBase {
protected:
DynamicDimensionInferenceTest() : HloTestBase() {
module_ = CreateNewVerifiedModule();
}
absl::Status RunInference(
OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr,
DynamicDimensionInference::CustomCallInferenceHandler handler = nullptr,
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore,
const DynamicDimensionInference::AssertionGenerator& assertion_generator =
nullptr) {
TF_ASSIGN_OR_RETURN(DynamicDimensionInference inference,
DynamicDimensionInference::Run(
module_.get(), op_supports_dynamism_handler,
handler, shape_check_mode, assertion_generator));
inference_ = std::make_unique<DynamicDimensionInference>(inference);
return absl::OkStatus();
}
HloComputation* GetAdd() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation* GetAddTuple() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto lhs_1 =
embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "lhs.1"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "rhs"));
auto rhs_1 =
embedded_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {}), "rhs.1"));
auto add = embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
auto add_1 = embedded_builder.AddInstruction(HloInstruction::CreateBinary(
lhs->shape(), HloOpcode::kAdd, lhs_1, rhs_1));
embedded_builder.AddInstruction(HloInstruction::CreateTuple({add, add_1}));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation* GetGe() {
auto embedded_builder = HloComputation::Builder("ge");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), lhs, rhs, ComparisonDirection::kGe));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<DynamicDimensionInference> inference_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});
};
TEST_F(DynamicDimensionInferenceTest, ParamTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "param"));
auto param2 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param"));
auto result = builder.AddInstruction(
HloInstruction::CreateSetDimensionSize(dynamic_shape, param, param2, 1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(result, {}, 1), param2);
EXPECT_EQ(inference_->GetDynamicSize(param, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(param2, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ElementwiseTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto* negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(negate, {}, 1), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ReduceTestI) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {2}, {true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, negate, init, {0, 2}, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ReduceTestII) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 2));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(reduce_shape, negate, init, {1}, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, VariadicReduce) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param_1 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto data_param_2 = builder.AddInstruction(
HloInstruction::CreateParameter(1, input_shape, "data_param.2"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "size_param"));
auto data_param_dynamic_1 =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param_1, size_param, 2));
auto data_param_dynamic_2 =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param_2, size_param, 2));
auto dynamic_negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param_dynamic_1));
auto dynamic_negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param_dynamic_2));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeTupleShape({reduce_shape, reduce_shape}),
{dynamic_negate_1, dynamic_negate_2}, {init, init}, {1}, GetAddTuple()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto xy_dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});
auto yz_dynamic_shape =
ShapeUtil::MakeShape(F32, {ydim, zdim}, {true, false});
auto xz_dynamic_shape =
ShapeUtil::MakeShape(F32, {xdim, zdim}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,
size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_dynamic_shape, a_param, size_param, 1));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
yz_dynamic_shape, b_param, size_param, 0));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(xz_dynamic_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTestBatch) {
auto builder = HloComputation::Builder(TestName());
auto lhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});
auto rhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});
auto output_shape =
ShapeUtil::MakeShape(F32, {4, 2, 128, 128}, {true, false, false, false});
auto lhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {4, 128, 2, 8}, {true, false, false, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, lhs_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, rhs_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
lhs_shape_dynamic, a_param, size_param, 0));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(3);
dot_dnums.add_rhs_contracting_dimensions(3);
dot_dnums.add_lhs_batch_dimensions(0);
dot_dnums.add_lhs_batch_dimensions(2);
dot_dnums.add_rhs_batch_dimensions(0);
dot_dnums.add_rhs_batch_dimensions(2);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 3), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTestMultiContracting) {
auto builder = HloComputation::Builder(TestName());
auto lhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 8, 64});
auto rhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 512});
auto output_shape = ShapeUtil::MakeShape(F32, {8, 64, 512});
auto lhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 8, 64}, {true, true, false, false});
auto rhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 512}, {true, true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, lhs_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, rhs_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, lhs_shape.dimensions(),
{true, false, false, false}),
a_param, size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
lhs_shape_dynamic, a_param, size_param, 1));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, rhs_shape.dimensions(), {true, false, false}),
b_param, size_param, 0));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
rhs_shape_dynamic, b_param, size_param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(1);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ConvolutionTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto xy_shape_dynamic = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});
auto zx_shape_dynamic =
ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,
size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_shape_dynamic, a_param, size_param, 1));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape_dynamic, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, TransposeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
auto output_shape = ShapeUtil::MakeShape(F32, {3, 2, 1}, {true, true, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,
size_param_1, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,
size_param_2, 1));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param_3, 2));
auto* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(output_shape, a_param, {2, 1, 0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_2);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_1);
}
TEST_F(DynamicDimensionInferenceTest, NonDescendingTransposeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
auto output_shape = ShapeUtil::MakeShape(F32, {3, 1, 2}, {true, true, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,
size_param_1, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,
size_param_2, 1));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param_3, 2));
auto* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(output_shape, a_param, {2, 0, 1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_1);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_2);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6});
auto output_shape = ShapeUtil::MakeShape(
F32, {6, 4, 1, 5, 2, 3}, {false, true, false, true, false, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, false, false}),
a_param, size_param, 2));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 3));
auto* reshape = builder.AddInstruction(
HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 2), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 3), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 4), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 5), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeInferredDimensionTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});
auto output_shape =
ShapeUtil::MakeShape(F32, {1, 4, 5}, {true, false, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* reshape = builder.AddInstruction(HloInstruction::CreateReshape(
output_shape, a_param, 0));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeTestMajorDimension) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {32, 10, 4});
auto output_shape = ShapeUtil::MakeShape(F32, {320, 4}, {true, false});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {32, 10, 4}, {true, false, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* reshape = builder.AddInstruction(
HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
absl::Status status = RunInference();
EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeIntoScalar) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1});
auto output_shape = ShapeUtil::MakeShape(F32, {});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1}, {true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
builder.AddInstruction(HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_CHECK_OK(RunInference());
}
TEST_F(DynamicDimensionInferenceTest, GatherTest) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[20,10]{1,0} parameter(0)
indices = s32[32,20] parameter(1)
dynamic_size = s32[] parameter(2)
indices_dynamic = s32[<=32,20] set-dimension-size(indices, dynamic_size), dimensions={0}
ROOT gather = s32[<=32,20,10]{2,1,0} gather(%operand, %indices_dynamic),
offset_dims={2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text));
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {}, 0),
module_->entry_computation()->parameter_instruction(2));
}
TEST_F(DynamicDimensionInferenceTest, BroadcastTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2});
auto output_shape =
ShapeUtil::MakeShape(F32, {3, 2, 4}, {false, true, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2}, {true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(output_shape, a_param, {1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 2), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, WhileTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto tuple_shape = ShapeUtil::MakeTupleShape({input_shape, input_shape});
auto dynamic_tuple_shape =
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param"));
auto gte_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 0));
auto gte_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
body_builder.AddInstruction(HloInstruction::CreateTuple({add, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, tuple_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* a_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));
a_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_0, size_param, 0));
auto* a_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));
a_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_1, size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateTuple({a_0, a_1}));
builder.AddInstruction(HloInstruction::CreateWhile(dynamic_tuple_shape,
condition, body, a_param));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference()); |
1,863 | cpp | tensorflow/tensorflow | dot_dimension_merger | third_party/xla/xla/service/dot_dimension_merger.cc | third_party/xla/xla/service/dot_dimension_merger_test.cc | #ifndef XLA_SERVICE_DOT_DIMENSION_MERGER_H_
#define XLA_SERVICE_DOT_DIMENSION_MERGER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class DotDimensionMerger : public HloModulePass {
public:
absl::string_view name() const override { return "dot_dimension_merger"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/dot_dimension_merger.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::vector<int64_t> ShiftDimensions(absl::Span<const int64_t> dimensions,
const int64_t start, const int64_t shift) {
std::vector<int64_t> new_dimensions;
new_dimensions.reserve(dimensions.size());
for (const int64_t i : dimensions) {
if (i < start) {
new_dimensions.push_back(i);
} else {
new_dimensions.push_back(i - shift);
}
}
return new_dimensions;
}
class BatchDimensionMerger : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* dot) override {
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
const Shape& lhs_shape = dot->operand(0)->shape();
const Shape& rhs_shape = dot->operand(1)->shape();
CHECK_EQ(dnums.lhs_batch_dimensions_size(),
dnums.rhs_batch_dimensions_size());
const int64_t batch_dimension_count = dnums.lhs_batch_dimensions_size();
if (batch_dimension_count < 2 ||
!DistinctNumbersAreConsecutiveIfSorted(dnums.lhs_batch_dimensions()) ||
!DistinctNumbersAreConsecutiveIfSorted(dnums.rhs_batch_dimensions()) ||
!absl::c_is_sorted(dnums.lhs_batch_dimensions()) ||
!absl::c_is_sorted(dnums.rhs_batch_dimensions()) ||
!LayoutUtil::AreDimensionsConsecutive(lhs_shape.layout(),
dnums.lhs_batch_dimensions()) ||
!LayoutUtil::AreDimensionsConsecutive(rhs_shape.layout(),
dnums.rhs_batch_dimensions())) {
return absl::OkStatus();
}
const int64_t lhs_batch_dimension =
*absl::c_min_element(dnums.lhs_batch_dimensions());
const int64_t rhs_batch_dimension =
*absl::c_min_element(dnums.rhs_batch_dimensions());
int64_t batch_size = 1;
for (const int64_t dimension_number : dnums.lhs_batch_dimensions()) {
batch_size *= lhs_shape.dimensions(dimension_number);
}
auto merge_batch_dims = [&](Shape old_shape, int64_t batch_dim) {
Shape new_shape = old_shape;
for (int64_t i = 1; i < batch_dimension_count; ++i) {
new_shape.DeleteDimension(batch_dim + 1);
}
new_shape.set_dimensions(batch_dim, batch_size);
return new_shape;
};
Shape new_lhs_shape = merge_batch_dims(lhs_shape, lhs_batch_dimension);
Shape new_rhs_shape = merge_batch_dims(rhs_shape, rhs_batch_dimension);
DotDimensionNumbers new_dot_dimension_numbers;
new_dot_dimension_numbers.add_lhs_batch_dimensions(lhs_batch_dimension);
new_dot_dimension_numbers.add_rhs_batch_dimensions(rhs_batch_dimension);
{
const std::vector<int64_t> shifted_contracting_dimensions =
ShiftDimensions(dnums.lhs_contracting_dimensions(),
lhs_batch_dimension, batch_dimension_count - 1);
new_dot_dimension_numbers.mutable_lhs_contracting_dimensions()->Assign(
shifted_contracting_dimensions.begin(),
shifted_contracting_dimensions.end());
}
{
const std::vector<int64_t> shifted_contracting_dimensions =
ShiftDimensions(dnums.rhs_contracting_dimensions(),
rhs_batch_dimension, batch_dimension_count - 1);
new_dot_dimension_numbers.mutable_rhs_contracting_dimensions()->Assign(
shifted_contracting_dimensions.begin(),
shifted_contracting_dimensions.end());
}
auto sparsity = Cast<HloDotInstruction>(dot)->sparsity();
std::vector<SparsityDescriptor> new_sparsity(sparsity.begin(),
sparsity.end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
SparsityDescriptor& descriptor = new_sparsity[i];
int64_t sparse_batch_dim =
descriptor.index() == 0 ? lhs_batch_dimension : rhs_batch_dimension;
if (descriptor.dimension() > sparse_batch_dim)
descriptor.set_dimension(descriptor.dimension() -
(batch_dimension_count - 1));
HloInstruction* meta =
dot->mutable_operand(HloDotInstruction::kOperands + i);
Shape new_meta_shape = merge_batch_dims(meta->shape(), sparse_batch_dim);
TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeReshapeHlo(new_meta_shape, meta));
}
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_lhs,
MakeReshapeHlo(new_lhs_shape, dot->mutable_operand(0)));
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_rhs,
MakeReshapeHlo(new_rhs_shape, dot->mutable_operand(1)));
Shape new_dot_shape = merge_batch_dims(dot->shape(), 0);
HloInstruction* new_dot = dot->parent()->AddInstruction(
HloInstruction::CreateDot(new_dot_shape, reshaped_lhs, reshaped_rhs,
new_dot_dimension_numbers,
dot->precision_config(), new_sparsity,
sparse_meta),
&dot->metadata());
dot->SetupDerivedInstruction(new_dot);
std::unique_ptr<HloInstruction> out_reshape =
HloInstruction::CreateReshape(dot->shape(), new_dot);
return ReplaceWithNewInstruction(dot, std::move(out_reshape));
}
};
}
absl::StatusOr<bool> DotDimensionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return BatchDimensionMerger().RunOnModule(module, execution_threads);
}
} | #include "xla/service/dot_dimension_merger.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using DotDimensionMergerTest = HloTestBase;
TEST_F(DotDimensionMergerTest, MergeConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[79,2,4,12,11] parameter(0)
p1 = bf16[79,2,4,11,44] parameter(1)
ROOT d = bf16[2,4,12,44] dot(p0, p1),
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},
metadata={op_name="testname"}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,2,1,0} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{3,2,1,0} reshape(%p1)
; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,1,0} dot(%[[R0]], %[[R1]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,2,1,0} reshape(%[[DOT]])
; CHECK-SAME: metadata={op_name="testname"}
)");
}
TEST_F(DotDimensionMergerTest,
MergeConsecutiveBatchDimensionsNonDefaultLayouts) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[79,2,4,12,11]{4,0,3,2,1} parameter(0)
p1 = bf16[79,2,4,11,44]{3,0,4,2,1} parameter(1)
ROOT d = bf16[2,4,12,44]{3,1,0,2} dot(p0, p1),
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},
metadata={op_name="testname"}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,0,2,1} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{2,0,3,1} reshape(%p1)
; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,0,1} dot(%[[R0]], %[[R1]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,1,0,2} reshape(%[[DOT]])
; CHECK-SAME: metadata={op_name="testname"}
)");
}
TEST_F(DotDimensionMergerTest, SkipPhysicallyNonConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[2,4,12,13]{3,1,2,0} parameter(0)
p1 = bf16[2,4,13,55]{3,2,1,0} parameter(1)
ROOT d = bf16[2,4,12,55]{3,2,1,0} dot(p0, p1),
lhs_batch_dims={0,1}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SkipUnsortedBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[4,2,12,13] parameter(0)
p1 = bf16[2,4,13,55] parameter(1)
ROOT d = bf16[2,4,12,55] dot(p0, p1),
lhs_batch_dims={1,0}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SkipLogicallyNonConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[2,12,4,13] parameter(0)
p1 = bf16[2,4,13,55] parameter(1)
ROOT d = bf16[2,4,12,55] dot(p0, p1),
lhs_batch_dims={0,2}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SparseDotUpdatesDescriptor) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[3,4,5,6,16] parameter(0)
p1 = bf16[3,4,5,32,6] parameter(1)
meta = u16[3,4,5,6,2] parameter(2)
ROOT d = bf16[4,5,6,6] dot(p0, p1, meta), sparsity=L.4@2:4,
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[3,20,6,16]{3,2,1,0} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[3,20,32,6]{3,2,1,0} reshape(%p1)
; CHECK: %[[R2:.*]] = u16[3,20,6,2]{3,2,1,0} reshape(%meta)
; CHECK: %[[DOT:.*]] = bf16[20,6,6]{2,1,0} dot(%[[R0]], %[[R1]], %[[R2]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-SAME: sparsity=L.3@2:4
; CHECK-NEXT: ROOT {{.+}} = bf16[4,5,6,6]{3,2,1,0} reshape(%[[DOT]])
)");
}
}
} |
1,864 | cpp | tensorflow/tensorflow | all_reduce_combiner | third_party/xla/xla/service/all_reduce_combiner.cc | third_party/xla/xla/service/all_reduce_combiner_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_COMBINER_H_
#define XLA_SERVICE_ALL_REDUCE_COMBINER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/array2d.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/xla_data.pb.h"
namespace xla {
class AllReduceCombiner : public HloModulePass {
public:
AllReduceCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count);
absl::string_view name() const override { return "all-reduce-combiner"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t combine_threshold_in_bytes_;
int64_t combine_threshold_count_;
};
}
#endif
#include "xla/service/all_reduce_combiner.h"
#include <algorithm>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
absl::Status CombineAllReduces(absl::Span<HloInstruction* const> to_combine) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " CRS ops";
HloComputation& computation = *to_combine.back()->parent();
HloComputation* reduction = to_combine[0]->to_apply();
const HloOpcode type = reduction->root_instruction()->opcode();
std::vector<HloInstruction*> operands;
std::vector<const Shape*> operand_shapes;
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllReduce);
TF_RET_CHECK(hlo->operands().size() == 1);
TF_RET_CHECK(hlo->to_apply() == reduction ||
(hlo->to_apply()->instruction_count() == 3 &&
hlo->to_apply()->num_parameters() == 2 &&
hlo->to_apply()->root_instruction()->opcode() == type));
TF_RET_CHECK(hlo->shape().IsArray());
for (HloInstruction* operand : hlo->operands()) {
operands.push_back(operand);
operand_shapes.push_back(&operand->shape());
}
}
HloInstruction* combined;
TF_RET_CHECK(operands.size() >= 2);
combined = computation.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes), operands, reduction,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloAllReduceInstruction>(to_combine.front())
->use_global_device_ids()));
combined->set_sharding(
hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine));
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
auto replace_with = HloInstruction::CreateGetTupleElement(
to_combine[i]->shape(), combined, i);
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
to_combine[i], std::move(replace_with)));
}
return absl::OkStatus();
}
}
AllReduceCombiner::AllReduceCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count) {}
absl::StatusOr<bool> AllReduceCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip AllReduceCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1) << "Skip AllReduceCombiner because the module contains all-reduce "
"with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn =
[&domain_map](
const HloInstruction* instruction) -> std::optional<AllReduceKey> {
if (instruction->opcode() != HloOpcode::kAllReduce) {
return std::nullopt;
}
return GetAllReduceKey(instruction, domain_map.get());
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<AllReduceKey>(
computation, key_fn, &CombineAllReduces,
combine_threshold_in_bytes_, combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/all_reduce_combiner.h"
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using std::nullopt;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
int64_t kMaxCombineCount = 256;
int64_t AllReduceCount(const HloModule& module) {
int64_t count = 0;
for (HloComputation* computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kAllReduce) {
++count;
}
}
}
return count;
}
HloInstruction* MakeCrossReplicaReductions(
std::vector<int64_t> sizes_in_kib, std::vector<HloComputation*> reductions,
std::vector<HloInstruction*>* inputs, HloComputation::Builder* b) {
CHECK_EQ(reductions.size(), sizes_in_kib.size());
std::vector<HloInstruction*> all_reduces;
for (int i = 0; i < sizes_in_kib.size(); i++) {
int64_t size_in_kib = sizes_in_kib[i];
HloComputation* reduction = reductions[i];
auto constant = b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
Shape shape = ShapeUtil::MakeShape(
F32, {static_cast<int32_t>(size_in_kib * 1024 / sizeof(float))});
auto input =
b->AddInstruction(HloInstruction::CreateBroadcast(shape, constant, {}));
inputs->push_back(input);
all_reduces.push_back(b->AddInstruction(HloInstruction::CreateAllReduce(
shape, {input}, reduction, CollectiveDeviceList(),
false, nullopt,
false)));
}
return b->AddInstruction(HloInstruction::CreateTuple(all_reduces));
}
HloComputation* MakeReduction(const HloOpcode type, HloModule* module) {
HloComputation::Builder sum_builder(HloOpcodeString(type));
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}), type, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
return reduction;
}
using AllReduceCombinerTest = HloTestBase;
TEST_F(AllReduceCombinerTest, CombineAllReduces) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
auto root = MakeCrossReplicaReductions(
{1, 2, 10, 7, 6}, {sum, sum, sum, sum, sum}, &inputs, &b);
auto computation = module->AddEntryComputation(b.Build());
AllReduceCombiner combine(10 * 1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
ASSERT_EQ(AllReduceCount(*module), 1);
EXPECT_TRUE(changed);
ASSERT_EQ(root, computation->root_instruction());
ASSERT_EQ(inputs.size(), root->operands().size());
HloInstruction* combined = nullptr;
for (int64_t i = 0; i < root->operands().size(); ++i) {
HloInstruction* hlo = root->mutable_operand(i);
ASSERT_TRUE(hlo->opcode() == HloOpcode::kGetTupleElement);
EXPECT_EQ(hlo->tuple_index(), i);
EXPECT_TRUE(ShapeUtil::Equal(inputs[i]->shape(), hlo->shape()));
if (combined == nullptr) {
combined = hlo->mutable_operand(0);
ASSERT_TRUE(combined->opcode() == HloOpcode::kAllReduce);
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), combined->shape()));
ASSERT_EQ(combined->operands().size(), inputs.size());
}
EXPECT_EQ(combined, hlo->operand(0));
EXPECT_TRUE(ShapeUtil::Equal(inputs[i]->shape(), hlo->shape()));
EXPECT_EQ(combined->operand(i), inputs[i]);
EXPECT_EQ(1, inputs[i]->users().size());
}
ASSERT_NE(combined, nullptr);
}
TEST_F(AllReduceCombinerTest, CombineCrossReplicaReductionsInGroups) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation* min = MakeReduction(HloOpcode::kMinimum, module.get());
HloComputation* max = MakeReduction(HloOpcode::kMaximum, module.get());
HloComputation* sum_2 = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
MakeCrossReplicaReductions(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
{sum, sum_2, min, min, min, max, max, max, sum, sum_2}, &inputs, &b);
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(10 * 1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
ASSERT_EQ(AllReduceCount(*module), 3)
<< "expects 3 groups for 3 reduction types.";
EXPECT_TRUE(changed);
}
TEST_F(AllReduceCombinerTest, RespectThreshold) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
MakeCrossReplicaReductions({8, 4}, {sum, sum}, &inputs, &b);
module->AddEntryComputation(b.Build());
{
AllReduceCombiner combine((8 + 4) * 1024 - 1, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), inputs.size());
EXPECT_FALSE(changed);
}
{
AllReduceCombiner combine((8 + 4) * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 1);
EXPECT_TRUE(changed);
}
}
TEST_F(AllReduceCombinerTest, NoDependentCombination) {
auto module = CreateNewVerifiedModule();
HloComputation* reduction = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
auto all_reduce = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList(),
false, nullopt,
false));
b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {all_reduce}, reduction,
CollectiveDeviceList(), false,
nullopt, false));
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, GroupAllReduce) {
auto module = CreateNewVerifiedModule(TestName(), 4);
HloComputation::Builder b(TestName());
HloComputation* reduction = MakeReduction(HloOpcode::kAdd, module.get());
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
auto crs0 = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList({{0, 1}, {2, 3}}),
false,
nullopt, false));
auto crs1 = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList({{0, 2}, {1, 3}}),
false,
nullopt, false));
b.AddInstruction(HloInstruction::CreateTuple({crs0, crs1}));
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, DomainPreventsCombining) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
crs0 = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
crs1 = f32[128] all-reduce(param1),
replica_groups={}, to_apply=summit, sharding={maximal device=1}
domain0 = f32[128] domain(crs0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}}, exit={maximal device=0}}
domain1 = f32[128] domain(crs1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}}, exit={maximal device=1}}
ROOT tuple = (f32[128], f32[128]) tuple(domain0, domain1),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
LOG(INFO) << "Original module:\n" << module->ToString();
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, CombineFromTwoDomainsWithSameMetadata) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
param2 = f32[128] parameter(2), sharding={maximal device=1}
crs0 = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
crs1 = f32[128] all-reduce(param1),
replica_groups={}, to_apply=summit, sharding={maximal device=1}
crs2 = f32[128] all-reduce(param2),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
domain0 = f32[128] domain(crs0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
domain1 = f32[128] domain(crs1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=1}}
domain2 = f32[128] domain(crs2),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
ROOT tuple = (f32[128], f32[128], f32[128]) tuple(domain0, domain1, domain2),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 3);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_TRUE(changed);
const HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
ASSERT_EQ(param0->user_count(), 1);
const HloInstruction* combined_ar = param0->users().front();
ASSERT_EQ(combined_ar->opcode(), HloOpcode::kAllReduce);
EXPECT_THAT(combined_ar, testing::opcode_matchers::Sharding(
"{{maximal device=0}, {maximal device=0}}"));
}
TEST_F(AllReduceCombinerTest, DoNotCombineCrossShardAndCrossReplicaInSPMD) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
cross_shard_ar = f32[128] all-reduce(param0),
replica_groups={{0}}, to_apply=summit, channel_id=1
cross_replica_ar = f32[128] all-reduce(param1),
replica_groups={{0}}, to_apply=summit, sharding={maximal device=1}
ROOT tuple = (f32[128], f32[128]) tuple(cross_shard_ar, cross_replica_ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, CrossCoreAllReduce) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
crs00 = f32[128] all-reduce(param0),
replica_groups={{0}}, channel_id=1, to_apply=summit,
sharding={maximal device=0}
crs01 = f32[128] all-reduce(param1),
replica_groups={{0}}, channel_id=1, to_apply=summit,
sharding={maximal device=1}
crs10 = f32[128] all-reduce(param0),
replica_groups={{0}}, channel_id=2, to_apply=summit,
sharding={maximal device=0}
crs11 = f32[128] all-reduce(param1),
replica_groups={{0}}, channel_id=2, to_apply=summit,
sharding={maximal device=1}
domain0 = f32[128] domain(crs00),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
ROOT add = f32[128] add(domain0, crs11),
sharding={maximal device=1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 4);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Domain(op::GetTupleElement(AllOf(
op::AllReduce(op::Parameter(0), op::Parameter(0)),
op::Shape("(f32[128], f32[128])")))),
op::GetTupleElement(AllOf(
op::AllReduce(op::Parameter(1), op::Parameter(1)),
op::Shape("(f32[128], f32[128])")))));
}
TEST_F(AllReduceCombinerTest, CrossCombineGroupCycle) {
const char* const hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
%max {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] maximum(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[128] parameter(1)
crs00 = f32[128] all-reduce(p0), to_apply=add
crs10 = f32[128] all-reduce(p1), to_apply=max
crs01 = f32[128] all-reduce(crs00), to_apply=max
crs11 = f32[128] all-reduce(crs10), to_apply=add
add0 = f32[128] add(crs01, crs11)
crs02 = f32[128] all-reduce(add0), to_apply=add
crs12 = f32[128] all-reduce(crs11), to_apply=add
ROOT tuple = (f32[128], f32[128]) tuple(crs02, crs12)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 6);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 4);
EXPECT_TRUE(changed);
auto crs0 = op::AllReduce(op::Parameter(0), op::AllReduce(op::Parameter(1)));
auto add = op::Add(op::AllReduce(op::GetTupleElement(crs0, 0)),
op::GetTupleElement(crs0, 1));
auto crs1 = op::AllReduce(add, op::GetTupleElement(crs0));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(crs1, 0), op::GetTupleElement(crs1, 1)));
}
}
} |
1,865 | cpp | tensorflow/tensorflow | host_offloader | third_party/xla/xla/service/host_offloader.cc | third_party/xla/xla/service/host_offloader_test.cc | #ifndef XLA_SERVICE_HOST_OFFLOADER_H_
#define XLA_SERVICE_HOST_OFFLOADER_H_
#include <cstdint>
#include <memory>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloCostAnalysis;
struct InstructionAndShapeIndex {
explicit InstructionAndShapeIndex(HloInstruction* instruction)
: instruction(instruction) {}
InstructionAndShapeIndex(HloInstruction* instruction, ShapeIndex shape_index)
: instruction(instruction), shape_index(shape_index) {}
HloInstruction* instruction;
ShapeIndex shape_index;
std::string ToString() const;
template <typename H>
static H Hash(H h, const InstructionAndShapeIndex& i) {
h = H::combine(std::move(h), i.instruction);
h = H::combine(std::move(h), i.shape_index);
return std::move(h);
}
template <typename H>
friend H AbslHashValue(H h, const InstructionAndShapeIndex& i) {
return InstructionAndShapeIndex::Hash(std::move(h), i);
}
};
bool operator==(const InstructionAndShapeIndex& lhs,
const InstructionAndShapeIndex& rhs);
class HostOffloader : public HloModulePass {
public:
explicit HostOffloader(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
~HostOffloader() override = default;
absl::string_view name() const override { return "host-offloader"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const int64_t kHostMemorySpaceColor;
absl::flat_hash_set<HloInstruction*>
already_visited_move_to_host_custom_calls_;
absl::flat_hash_set<HloInstruction*> dynamic_update_slices_already_allocated_;
absl::flat_hash_set<HloInstruction*> validated_slices_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> copies_created_after_;
absl::flat_hash_set<HloInstruction*> move_to_device_custom_calls_to_remove_;
absl::flat_hash_set<InstructionAndShapeIndex> already_inserted_copy_before_;
absl::StatusOr<HloInstruction*> DynamifySlice(HloInstruction* slice);
bool IsValidDuringPureMemoryOffload(const HloInstruction* instruction) const;
bool InstructionIsAllowedBetweenMoveToHostAndDus(
const HloInstruction* instruction) const;
bool InstructionIsAllowedBetweenDsAndMoveToDevice(
const HloInstruction* instruction) const;
absl::StatusOr<bool> HandleInputStreaming(HloComputation* entry_computation);
absl::StatusOr<bool> HandleMoveToHostCustomCall(
HloInstruction* custom_call_instruction);
absl::StatusOr<bool> HandleMoveToDeviceCustomCall(
HloInstruction* custom_call_instruction);
absl::Status CreateAllocateBufferForDynamicUpdateSlice(
HloInstruction* dynamic_update_slice);
absl::Status ValidateSliceLeadsToMoveToDeviceCustomCall(
HloInstruction* slice);
absl::StatusOr<bool> WalkDownHostMemoryOffloadPaths(
const InstructionAndShapeIndex& starting_instruction_and_index,
bool insert_copy_before);
absl::StatusOr<std::vector<InstructionAndShapeIndex>> GetStartingInstructions(
HloInstruction* custom_call_instruction);
absl::StatusOr<bool> InsertCopyBetween(
const InstructionAndShapeIndex& before_instruction_and_index,
const InstructionAndShapeIndex& after_instruction_and_index);
absl::StatusOr<bool> ApplySchedulingFix(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
};
}
#endif
#include "xla/service/host_offloader.h"
#include <array>
#include <cstdint>
#include <iomanip>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_value.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::xla::host_memory_offload_annotations::kMoveToDeviceCustomCallTarget;
using ::xla::host_memory_offload_annotations::kMoveToHostCustomCallTarget;
void SetMemorySpace(Shape* shape, int64_t memory_space_color) {
CHECK(shape->has_layout());
shape->mutable_layout()->set_memory_space(memory_space_color);
}
bool SetBuffersToMemorySpaceColor(
const std::vector<InstructionAndShapeIndex>& buffers_to_set_to_host_memory,
int64_t memory_space_color) {
bool changed = false;
for (const auto& instr_and_shape : buffers_to_set_to_host_memory) {
VLOG(2) << absl::StreamFormat("Setting %s to memory space %d",
instr_and_shape.ToString(),
memory_space_color);
Shape* shape = ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index);
CHECK(shape->has_layout()) << "Shape must have a layout";
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index),
memory_space_color);
changed = true;
}
return changed;
}
bool CustomCallReusesBuffer(const HloInstruction* custom_call,
int64_t operand_index) {
if (custom_call->custom_call_target() == kMoveToDeviceCustomCallTarget ||
custom_call->custom_call_target() == kMoveToHostCustomCallTarget) {
return true;
}
const std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>&
aliases = custom_call->output_operand_aliasing();
for (const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>& alias :
aliases) {
int64_t alias_operand_index = alias.second.first;
if (alias_operand_index == operand_index) {
return true;
}
}
return false;
}
absl::StatusOr<std::vector<InstructionAndShapeIndex>> GetSuccessors(
const InstructionAndShapeIndex& instruction_and_shape_index) {
std::vector<InstructionAndShapeIndex> result;
HloInstruction* instruction = instruction_and_shape_index.instruction;
if (instruction->IsRoot()) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
auto callers = call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
result.push_back({caller, instruction_and_shape_index.shape_index});
}
}
for (HloInstruction* user : instruction->users()) {
if (user->opcode() == HloOpcode::kTuple) {
auto operand_indices = user->OperandIndices(instruction);
for (const auto i : operand_indices) {
auto tmp_shape_index = instruction_and_shape_index.shape_index;
tmp_shape_index.push_back(i);
result.push_back({user, std::move(tmp_shape_index)});
}
} else if (user->opcode() == HloOpcode::kGetTupleElement) {
ShapeIndex tmp_shape_index = instruction_and_shape_index.shape_index;
const auto index = tmp_shape_index.front();
if (index == user->tuple_index()) {
tmp_shape_index.pop_front();
result.push_back({user, std::move(tmp_shape_index)});
}
} else if (user->opcode() == HloOpcode::kCall) {
auto operand_indices = user->OperandIndices(instruction);
CHECK(user->called_computations().size() == 1)
<< "Expect call to only have one called computation.";
for (const auto i : operand_indices) {
HloComputation* called_computation =
user->called_computations().front();
HloInstruction* parameter_instruction =
called_computation->parameter_instruction(i);
result.push_back(
{parameter_instruction, instruction_and_shape_index.shape_index});
}
} else if (user->opcode() == HloOpcode::kWhile) {
auto operand_indices = user->OperandIndices(instruction);
HloComputation* while_body_computation = user->while_body();
HloComputation* while_condition_computation = user->while_condition();
for (const auto i : operand_indices) {
HloInstruction* parameter_instruction =
while_body_computation->parameter_instruction(i);
result.push_back(
{parameter_instruction, instruction_and_shape_index.shape_index});
HloInstruction* condition_instruction =
while_condition_computation->parameter_instruction(i);
result.push_back(
{condition_instruction, instruction_and_shape_index.shape_index});
}
} else if (user->opcode() == HloOpcode::kAsyncStart) {
auto operand_indices = user->OperandIndices(instruction);
CHECK(user->called_computations().size() == 1)
<< "Expect async-start to only have one called computation.";
for (const auto i : operand_indices) {
HloComputation* called_computation =
user->called_computations().front();
HloInstruction* parameter_instruction =
called_computation->parameter_instruction(i);
result.push_back(
{parameter_instruction, instruction_and_shape_index.shape_index});
}
} else if (user->opcode() == HloOpcode::kCustomCall) {
const auto operand_indices = user->OperandIndices(instruction);
bool found_one = false;
for (const auto i : operand_indices) {
if (CustomCallReusesBuffer(user, i)) {
if (found_one) {
return absl::InternalError(
"Found multiple operands of a custom call that reuse the same "
"output buffer.");
}
result.push_back({user, instruction_and_shape_index.shape_index});
found_one = true;
}
}
} else {
result.push_back({user, instruction_and_shape_index.shape_index});
}
}
return result;
}
std::vector<InstructionAndShapeIndex> GetPredecessors(
const InstructionAndShapeIndex& instruction_and_shape_index) {
std::vector<InstructionAndShapeIndex> result;
HloInstruction* instruction = instruction_and_shape_index.instruction;
if (instruction->opcode() == HloOpcode::kGetTupleElement) {
const int64_t index = instruction->tuple_index();
auto tmp_shape_index = instruction_and_shape_index.shape_index;
tmp_shape_index.push_front(index);
result.push_back({instruction->mutable_operand(0), tmp_shape_index});
} else if (instruction->opcode() == HloOpcode::kTuple) {
CHECK(!instruction_and_shape_index.shape_index.empty())
<< "Did not store an index before encountering a tuple.";
auto tmp_shape_index = instruction_and_shape_index.shape_index;
const int64_t index = tmp_shape_index.front();
tmp_shape_index.pop_front();
result.push_back({instruction->mutable_operand(index), tmp_shape_index});
} else if (instruction->opcode() == HloOpcode::kCall) {
CHECK(instruction->called_computations().size() == 1)
<< "Expect call to only have one called computation.";
HloComputation* called_computation =
instruction->called_computations().front();
result.push_back({called_computation->root_instruction(),
instruction_and_shape_index.shape_index});
} else if (instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
auto callers = call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
result.push_back(
{caller->mutable_operand(instruction->parameter_number()),
instruction_and_shape_index.shape_index});
}
} else if (instruction->opcode() == HloOpcode::kDynamicSlice) {
result.push_back({instruction->mutable_operand(0),
instruction_and_shape_index.shape_index});
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
result.push_back({instruction->mutable_operand(0),
instruction_and_shape_index.shape_index});
} else if (instruction->opcode() == HloOpcode::kWhile) {
HloComputation* while_body_computation = instruction->while_body();
result.push_back({while_body_computation->root_instruction(),
instruction_and_shape_index.shape_index});
} else {
CHECK(instruction->operand_count() == 1) << absl::StreamFormat(
"Expecting instruction %s to have 1 operand, but it has %d.",
instruction->name(), instruction->operand_count());
result.push_back({instruction->mutable_operand(0),
instruction_and_shape_index.shape_index});
}
return result;
}
}
bool operator==(const InstructionAndShapeIndex& lhs,
const InstructionAndShapeIndex& rhs) {
return lhs.instruction == rhs.instruction &&
lhs.shape_index == rhs.shape_index;
}
std::string InstructionAndShapeIndex::ToString() const {
return absl::StrFormat("{Instr: %s, ShapeIndex: %s}", instruction->name(),
shape_index.ToString());
}
bool HostOffloader::IsValidDuringPureMemoryOffload(
const HloInstruction* instruction) const {
static constexpr std::array allowed_opcodes = {
HloOpcode::kGetTupleElement,
HloOpcode::kBitcast,
HloOpcode::kTuple,
HloOpcode::kCall,
HloOpcode::kWhile,
HloOpcode::kParameter,
HloOpcode::kOptimizationBarrier,
HloOpcode::kAsyncStart,
HloOpcode::kAsyncDone,
HloOpcode::kCustomCall};
return absl::c_linear_search(allowed_opcodes, instruction->opcode());
}
bool HostOffloader::InstructionIsAllowedBetweenMoveToHostAndDus(
const HloInstruction* instruction) const {
if (instruction->opcode() == HloOpcode::kReshape) {
return ShapeUtil::ReshapeIsBitcast(instruction->operand(0)->shape(),
instruction->shape());
}
return instruction->opcode() == HloOpcode::kBitcast;
}
bool HostOffloader::InstructionIsAllowedBetweenDsAndMoveToDevice(
const HloInstruction* instruction) const {
if (instruction->opcode() == HloOpcode::kReduce) {
return ShapeUtil::TrueRank(instruction->operand(0)->shape()) ==
ShapeUtil::TrueRank(instruction->shape());
}
if (instruction->opcode() == HloOpcode::kReshape) {
return ShapeUtil::ReshapeIsBitcast(instruction->operand(0)->shape(),
instruction->shape());
}
return instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kCopy;
}
absl::StatusOr<bool> HostOffloader::WalkDownHostMemoryOffloadPaths(
const InstructionAndShapeIndex& starting_instruction_and_index,
bool insert_copy_before) {
bool changed = false;
absl::flat_hash_set<HloInstruction*> mth_custom_calls_to_remove;
absl::flat_hash_set<HloInstruction*> slices_to_dynamify;
absl::flat_hash_set<HloInstruction*> custom_calls_to_insert_copies_before;
std::vector<InstructionAndShapeIndex> buffers_to_set_to_host_memory;
std::vector<HloInstruction*> dynamic_update_slices;
HloInstruction* starting_instruction =
starting_instruction_and_index.instruction;
std::queue<InstructionAndShapeIndex> queue;
queue.push(starting_instruction_and_index);
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape_index = queue.front();
queue.pop();
HloInstruction* instruction = instruction_and_shape_index.instruction;
VLOG(4) << absl::StreamFormat("Visiting instruction: %s",
instruction_and_shape_index.ToString());
bool already_saved_buffer = false;
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() ==
host_memory_offload_annotations::kMoveToHostCustomCallTarget) {
already_visited_move_to_host_custom_calls_.insert(instruction);
mth_custom_calls_to_remove.insert(instruction);
} else if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() ==
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget) {
custom_calls_to_insert_copies_before.insert(instruction);
continue;
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
if (instruction == starting_instruction) {
dynamic_update_slices.push_back(instruction);
} else {
dynamic_update_slices_already_allocated_.insert(instruction);
}
} else if (IsValidDuringPureMemoryOffload(instruction)) {
if (instruction->opcode() == HloOpcode::kAsyncStart) {
already_saved_buffer = true;
} else if (instruction->opcode() == HloOpcode::kAsyncDone) {
HloInstruction* async_start = instruction->mutable_operand(0);
buffers_to_set_to_host_memory.emplace_back(async_start, ShapeIndex{1});
} else if (instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
if (caller->opcode() == HloOpcode::kAsyncStart) {
ShapeIndex tmp_index = instruction_and_shape_index.shape_index;
tmp_index.push_front(instruction->parameter_number());
tmp_index.push_front(
0);
buffers_to_set_to_host_memory.emplace_back(caller, tmp_index);
}
}
}
} else if (instruction->opcode() == HloOpcode::kDynamicSlice) {
TF_RETURN_IF_ERROR(
ValidateSliceLeadsToMoveToDeviceCustomCall(instruction));
continue;
} else if (instruction->opcode() == HloOpcode::kSlice) {
TF_RETURN_IF_ERROR(
ValidateSliceLeadsToMoveToDeviceCustomCall(instruction));
slices_to_dynamify.insert(instruction);
continue;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("Tensor which is moved to host (starting from "
"\"%s\") is used by an instruction (\"%s\") which is "
"not acceptable during pure memory offload.",
starting_instruction->name(), instruction->name()));
}
if (!already_saved_buffer) {
VLOG(5) << "Saving " << instruction_and_shape_index.ToString()
<< " to be set to host memory.";
buffers_to_set_to_host_memory.push_back(instruction_and_shape_index);
}
if (instruction->IsRoot() && instruction->parent()->IsEntryComputation()) {
const Shape& output_shape = ShapeUtil::GetSubshape(
instruction->GetModule()->entry_computation_layout().result_shape(),
instruction_and_shape_index.shape_index);
CHECK(output_shape.has_layout())
<< "Expecting output shape of entry computation to have a layout.";
if (output_shape.layout().memory_space() == kHostMemorySpaceColor) {
VLOG(2) << absl::StreamFormat(
"Memory offloaded starting from %s is output streamed",
starting_instruction_and_index.ToString());
continue;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("Tensor which is moved to host (starting from %s) "
"is returned from the entry computation but the "
"layout for this output is not set to host memory.",
starting_instruction->name()));
}
}
TF_ASSIGN_OR_RETURN(const std::vector<InstructionAndShapeIndex> successors,
GetSuccessors(instruction_and_shape_index));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
const bool set_buffers_changed = SetBuffersToMemorySpaceColor(
buffers_to_set_to_host_memory, kHostMemorySpaceColor);
changed = changed || set_buffers_changed;
for (HloInstruction* dus : dynamic_update_slices) {
TF_RETURN_IF_ERROR(CreateAllocateBufferForDynamicUpdateSlice(dus));
changed = true;
}
if (insert_copy_before) {
const auto predecessors = GetPredecessors(starting_instruction_and_index);
CHECK_EQ(predecessors.size(), 1);
TF_ASSIGN_OR_RETURN(bool inserted_copy,
InsertCopyBetween(predecessors.front(),
starting_instruction_and_index));
changed = changed || inserted_copy;
}
for (HloInstruction* custom_call : custom_calls_to_insert_copies_before) {
HloInstruction* data_to_copy = custom_call->mutable_operand(0);
HloInstruction* copy_to_device =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_device->mutable_shape(),
Layout::kDefaultMemorySpace);
VLOG(1) << absl::StreamFormat(
"Inserted copy \"%s\" before custom call \"%s\"",
copy_to_device->name(), custom_call->name());
TF_RETURN_IF_ERROR(custom_call->ReplaceAllUsesWith(copy_to_device));
changed = true;
}
for (HloInstruction* custom_call : mth_custom_calls_to_remove) {
VLOG(1) << absl::StreamFormat("Removing MoveToHost custom call \"%s\"",
custom_call->name());
TF_RETURN_IF_ERROR(
custom_call->ReplaceAllUsesWith(custom_call->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call->parent()->RemoveInstruction(custom_call));
changed = true;
}
for (HloInstruction* slice : slices_to_dynamify) {
TF_ASSIGN_OR_RETURN(HloInstruction * dynamic_slice, DynamifySlice(slice));
validated_slices_.insert(dynamic_slice);
changed = true;
}
return changed;
}
absl::StatusOr<bool> HostOffloader::HandleInputStreaming(
HloComputation* entry_computation) {
bool changed = false;
const ComputationLayout& entry_computation_layout =
entry_computation->parent()->entry_computation_layout();
for (int i = 0; i < entry_computation_layout.parameter_count(); ++i) {
if (entry_computation_layout.parameter_shape(i).IsToken()) {
LOG(WARNING) << "Token parameters are not supported for streaming.";
continue;
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
entry_computation_layout.parameter_shape(i),
[&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.has_layout() &&
subshape.layout().memory_space() == kHostMemorySpaceColor) {
HloInstruction* parameter_instruction =
entry_computation->parameter_instruction(i);
VLOG(1) << "Host parameter streamed into program with shape: "
<< subshape.ToString(true) << " at index "
<< index.ToString();
TF_ASSIGN_OR_RETURN(
bool result,
WalkDownHostMemoryOffloadPaths(
InstructionAndShapeIndex(parameter_instruction, index),
false));
changed = changed || result;
}
return absl::OkStatus();
}));
}
return changed;
}
absl::StatusOr<bool> HostOffloader::HandleMoveToHostCustomCall(
HloInstruction* custom_call_instruction) {
if (already_visited_move_to_host_custom_calls_.contains(
custom_call_instruction)) {
return false;
}
VLOG(1) << "Offloading " << custom_call_instruction->operand(0)->name()
<< " to host.";
TF_ASSIGN_OR_RETURN(
std::vector<InstructionAndShapeIndex> starting_instruction_and_shapes,
GetStartingInstructions(custom_call_instruction));
if (starting_instruction_and_shapes.empty()) { | #include "xla/service/host_offloader.h"
#include <cstdint>
#include <stack>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace {
class HostOffloaderTest : public HloTestBase {
protected:
static constexpr int64_t kHostMemorySpaceColor{5};
absl::StatusOr<bool> RunHostOffloader(HloModule* module,
bool after_layout = false) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
bool changed = false;
HostOffloadLegalize host_offload_legalize(kHostMemorySpaceColor,
after_layout);
TF_ASSIGN_OR_RETURN(bool legal_changed, host_offload_legalize.Run(module));
changed |= legal_changed;
HostOffloader host_offloader(kHostMemorySpaceColor);
TF_ASSIGN_OR_RETURN(bool offload_changed, host_offloader.Run(module));
changed |= offload_changed;
return changed;
}
void TestShapeHasMemorySpace(const Shape& shape, int64_t memory_space) {
ASSERT_TRUE(shape.has_layout());
EXPECT_EQ(shape.layout().memory_space(), memory_space);
}
bool HaveRemainingOffloadAnnotations(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget,
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget})) {
return true;
}
}
}
return false;
}
};
TEST_F(HostOffloaderTest, BasicDusDs) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_update_slice->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusFirstOperandIsNotFromABroadcast) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
param_2 = f32[2,2048,2048] parameter(2)
constant_s32_0 = s32[] constant(0)
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(param_2, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const absl::StatusOr<bool> result = RunHostOffloader(module.get());
EXPECT_FALSE(result.ok());
}
TEST_F(HostOffloaderTest, DusDsWithTupleAfterBroadcast) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
tuple = (f32[2,2048,2048]) tuple(broadcast)
gte = f32[2,2048,2048] get-tuple-element(tuple), index=0
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(gte, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* tuple;
HloInstruction* gte;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::GetTupleElement(
>e,
m::Tuple(&tuple, m::CustomCall(&allocate_buffer,
{"AllocateBuffer"})),
0),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
kHostMemorySpaceColor);
TestShapeHasMemorySpace(gte->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_update_slice->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusWithoutDs) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
ROOT load_custom_call = f32[2,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* copy;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_update_slice->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusAndNoCopyFromSameCustomCall) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
tuple = (f32[1,2048,2048]) tuple(offload_custom_call)
gte = f32[1,2048,2048] get-tuple-element(tuple), index=0
load_custom_call_0 = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
load_custom_call_1 = f32[1,2048,2048] custom-call(gte), custom_call_target="MoveToDevice"
ROOT tuple_1 = (f32[1,2048,2048], f32[1,2048,2048]) tuple(load_custom_call_0, load_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_match_1;
HloInstruction* param_match_2;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
HloInstruction* copy_to_host;
HloInstruction* tuple_0;
HloInstruction* gte;
HloInstruction* copy_to_device;
HloInstruction* tuple_1;
const auto dynamic_slice_pattern = m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m_match_1, 0), m::Op(), m::Op(),
m::Op()),
m::Op(), m::Op(), m::Op());
const auto copy_pattern = m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Tuple(&tuple_0,
m::Copy(©_to_host, m::Parameter(¶m_match_2, 0))),
0));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(&tuple_1, dynamic_slice_pattern, copy_pattern)));
EXPECT_EQ(param_match_1, param_match_2);
TestShapeHasMemorySpace(param_match_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_update_slice->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {0}),
kHostMemorySpaceColor);
TestShapeHasMemorySpace(gte->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicAsyncCustomCallWithAliasing) {
const std::string& hlo_string = R"(
HloModule m, input_output_alias={{}: (0, {}, must-alias)},
entry_computation_layout={(f32[4096]{0:T(128)S(5)})->f32[4096]{0:T(128)S(5)}}
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[]) custom-call-start(%a),
custom_call_target="Foo",
output_to_operand_aliasing={{}: (0, {})}
ROOT %async-done = f32[4096]{0} custom-call-done(%async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TestShapeHasMemorySpace(async_done->shape(), kHostMemorySpaceColor);
}
TEST_F(HostOffloaderTest, ParameterStreamingWithXposeCopyFeedingIntoWhile) {
const std::string& hlo_string = R"(
HloModule jit__prefill_impl, entry_computation_layout={(bf16[2,16,16]{2,1,0:T(8,128)(2,1)S(5)})->bf16[2,16,16]{1,2,0:T(8,128)(2,1)}}
while_condition {
condition_param = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=0
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
input_tuple.0 = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
orig_data = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=1
custom-call.0 = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} custom-call(orig_data), custom_call_target="MoveToDevice"
sum = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=2
sum.1 = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} add(custom-call.0, sum)
constant_1 = s32[] constant(1)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1)
ROOT tuple_result.0 = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) tuple(incremented_index.0, custom-call.0, sum.1)
}
ENTRY main {
param.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} parameter(0)
copy = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} copy(param.0)
constant_0 = s32[] constant(0)
constant_0.0 = bf16[] constant(0.0)
broadcast = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} broadcast(constant_0.0), dimensions={}
tuple_for_while = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) tuple(constant_0, copy, broadcast)
while = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) while(tuple_for_while), condition=while_condition, body=while_body
ROOT gte = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, ParameterStreamingFeedingIntoWhile) {
const std::string& hlo_string = R"(
HloModule jit__prefill_impl, entry_computation_layout={(bf16[2,16,16]{2,1,0:T(8,128)(2,1)S(5)})->bf16[2,16,16]{2,1,0:T(8,128)(2,1)}}
while_condition {
condition_param = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=0
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
input_tuple.0 = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
orig_data = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=1
custom-call.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} custom-call(orig_data), custom_call_target="MoveToDevice"
sum = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=2
sum.1 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} add(custom-call.0, sum)
constant_1 = s32[] constant(1)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1)
ROOT tuple_result.0 = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) tuple(incremented_index.0, custom-call.0, sum.1)
}
ENTRY main {
param.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} parameter(0)
constant_0 = s32[] constant(0)
constant_0.0 = bf16[] constant(0.0)
broadcast = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} broadcast(constant_0.0), dimensions={}
tuple_for_while = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) tuple(constant_0, param.0, broadcast)
while = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) while(tuple_for_while), condition=while_condition, body=while_body
ROOT gte = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, ParameterStreamingInScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(f32[8,2]{0,1:T(2,128)S(5)})->(f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)})},
allow_spmd_sharding_propagation_to_output={true,true}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
while_body {
arg_tuple.8 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) parameter(0)
get-tuple-element.9 = s32[]{:T(256)} get-tuple-element(arg_tuple.8), index=0
constant.12 = s32[]{:T(256)} constant(1)
add.29 = s32[]{:T(256)} add(get-tuple-element.9, constant.12)
get-tuple-element.10 = f32[8,2]{0,1:T(2,128)} get-tuple-element(arg_tuple.8), index=1
get-tuple-element.11 = f32[8,2]{0,1:T(2,128)} get-tuple-element(arg_tuple.8), index=2
constant.16 = s32[]{:T(256)} constant(0)
dynamic-slice.20 = f32[1,2]{0,1:T(2,128)} dynamic-slice(get-tuple-element.11, get-tuple-element.9, constant.16), dynamic_slice_sizes={1,2}
constant.1 = f32[] constant(-0)
reduce = f32[2]{0:T(256)} reduce(dynamic-slice.20, constant.1), dimensions={0}, to_apply=add
custom-call = f32[2]{0:T(256)} custom-call(reduce), custom_call_target="MoveToDevice"
constant.13 = f32[]{:T(256)} constant(1)
broadcast.14 = f32[2]{0:T(256)} broadcast(constant.13), dimensions={}
add.23 = f32[2]{0:T(256)} add(custom-call, broadcast.14)
reshape.24 = f32[1,2]{0,1:T(2,128)} reshape(add.23)
dynamic-update-slice.28 = f32[8,2]{0,1:T(2,128)} dynamic-update-slice(get-tuple-element.10, reshape.24, get-tuple-element.9, constant.16)
ROOT tuple.30 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) tuple(add.29, dynamic-update-slice.28, get-tuple-element.11)
}
condition {
arg_tuple.32 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) parameter(0)
get-tuple-element.33 = s32[]{:T(256)} get-tuple-element(arg_tuple.32), index=0
constant.36 = s32[]{:T(256)} constant(8)
ROOT compare.37 = pred[]{:T(1024)} compare(get-tuple-element.33, constant.36), direction=LT
}
ENTRY e {
constant.3 = f32[]{:T(256)} constant(1)
constant.2 = s32[]{:T(256)} constant(0)
constant.4 = f32[]{:T(256)} constant(0)
broadcast.5 = f32[8,2]{0,1:T(2,128)} broadcast(constant.4), dimensions={}
Arg_0.1 = f32[8,2]{0,1:T(2,128)} parameter(0), sharding={replicated}
tuple.6 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.2, broadcast.5, Arg_0.1)
while.38 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) while(tuple.6), condition=condition, body=while_body
get-tuple-element.40 = f32[8,2]{0,1:T(2,128)} get-tuple-element(while.38), index=1
ROOT tuple.42 = (f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.3, get-tuple-element.40)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
}
TEST_F(HostOffloaderTest, OutputStreamingInScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(f32[4,1]{0,1:T(2,128)})->(f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)S(5)})},
allow_spmd_sharding_propagation_to_output={true,true}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
while_body {
param.1 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) parameter(0)
get-tuple-element.1 = s32[]{:T(256)} get-tuple-element(param.1), index=0
constant.9 = s32[]{:T(256)} constant(1)
add.1 = s32[]{:T(256)} add(get-tuple-element.1, constant.9)
get-tuple-element.2 = f32[8,2]{0,1:T(2,128)} get-tuple-element(param.1), index=1
get-tuple-element.3 = f32[4,1]{0,1:T(2,128)} get-tuple-element(param.1), index=2
bitcast = f32[1,4,1]{1,2,0:T(2,128)} bitcast(get-tuple-element.3)
all-gather.2 = f32[4,4,1]{1,2,0:T(2,128)} all-gather(bitcast), channel_id=2, replica_groups={{0,1,2,3}}, dimensions={0}, use_global_device_ids=true
constant.20 = f32[] constant(-0)
reduce = f32[4,4]{1,0:T(4,128)} reduce(all-gather.2, constant.20), dimensions={2}, to_apply=add
bitcast.1 = f32[2,4,2,1]{1,2,0,3:T(2,128)} bitcast(reduce)
copy.1 = f32[2,4,2,1]{1,0,2,3:T(2,128)} copy(bitcast.1)
reshape.6 = f32[8,2]{0,1:T(2,128)} reshape(copy.1)
constant.10 = s32[]{:T(256)} constant(0)
dynamic-slice.0 = f32[1,2]{0,1:T(2,128)} dynamic-slice(reshape.6, get-tuple-element.1, constant.10), dynamic_slice_sizes={1,2}
constant.11 = f32[]{:T(256)} constant(1)
broadcast.4 = f32[1,2]{0,1:T(2,128)} broadcast(constant.11), dimensions={}
add.2 = f32[1,2]{0,1:T(2,128)} add(dynamic-slice.0, broadcast.4)
reduce.1 = f32[2]{0:T(256)} reduce(add.2, constant.20), dimensions={0}, to_apply=add
custom-call.1 = f32[2]{0:T(256)} custom-call(reduce.1), custom_call_target="MoveToHost"
reshape.8 = f32[1,2]{0,1:T(2,128)} reshape(custom-call.1)
dynamic-update-slice.0 = f32[8,2]{0,1:T(2,128)} dynamic-update-slice(get-tuple-element.2, reshape.8, get-tuple-element.1, constant.10)
ROOT tuple = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) tuple(add.1, dynamic-update-slice.0, get-tuple-element.3)
}
condition {
param = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) parameter(0)
get-tuple-element = s32[]{:T(256)} get-tuple-element(param), index=0
constant.8 = s32[]{:T(256)} constant(8)
ROOT compare.0 = pred[]{:T(1024)} compare(get-tuple-element, constant.8), direction=LT
}
ENTRY e {
constant.17 = f32[]{:T(256)} constant(1)
constant.18 = s32[]{:T(256)} constant(0)
constant.19 = f32[]{:T(256)} constant(0)
broadcast.6 = f32[8,2]{0,1:T(2,128)} broadcast(constant.19), dimensions={}
param.2 = f32[4,1]{0,1:T(2,128)} parameter(0), sharding={devices=[2,2]<=[4]}
tuple.1 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) tuple(constant.18, broadcast.6, param.2)
while = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) while(tuple.1), condition=condition, body=while_body
get-tuple-element.4 = f32[8,2]{0,1:T(2,128)} get-tuple-element(while), index=1
ROOT tuple.2 = (f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.17, get-tuple-element.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
}
TEST_F(HostOffloaderTest, BasicNoCopy) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
ROOT load_custom_call = f32[2048] custom-call(offload_custom_call), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughTuple) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
other_param = f32[2048] parameter(1)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple = (f32[2048], f32[2048]) tuple(offload_custom_call, other_param)
gte_0 = f32[2048] get-tuple-element(tuple), index=0
gte_1 = f32[2048] get-tuple-element(tuple), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Tuple(&tuple, m::Copy(©_to_host, m::Parameter(¶m, 0)),
m::Op()),
0))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
kHostMemorySpaceColor);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte->shape(), kHostMemorySpaceColor);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughNestedTuple) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
other_param_0 = f32[2048] parameter(1)
other_param_1 = f32[2048] parameter(2)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple_0 = (f32[2048], f32[2048]) tuple(offload_custom_call, other_param_0)
tuple_1 = ((f32[2048], f32[2048]), f32[2048]) tuple(tuple_0, other_param_1)
gte_0 = (f32[2048], f32[2048]) get-tuple-element(tuple_1), index=0
gte_1 = f32[2048] get-tuple-element(tuple_1), index=1
gte_2 = f32[2048] get-tuple-element(gte_0), index=0
gte_3 = f32[2048] get-tuple-element(gte_0), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_2), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* par |
1,866 | cpp | tensorflow/tensorflow | all_reduce_simplifier | third_party/xla/xla/service/all_reduce_simplifier.cc | third_party/xla/xla/service/all_reduce_simplifier_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_SIMPLIFIER_H_
#define XLA_SERVICE_ALL_REDUCE_SIMPLIFIER_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceSimplifier : public HloModulePass {
public:
explicit AllReduceSimplifier(int64_t replica_count)
: replica_count_(replica_count) {}
~AllReduceSimplifier() override = default;
absl::string_view name() const override { return "all-reduce-simp"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t replica_count_;
};
}
#endif
#include "xla/service/all_reduce_simplifier.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool> AllReduceSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
auto replication,
HloReplicationAnalysis::Run(module, false));
std::vector<std::pair<HloInstruction*, int64_t>> all_reduces_to_replace;
auto get_participant_counts_for_replica_group =
[](const HloInstruction* all_reduce) -> absl::StatusOr<int64_t> {
const HloModuleConfig& config = all_reduce->GetModule()->config();
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(),
Cast<HloAllReduceInstruction>(all_reduce)
->use_global_device_ids()));
int64_t num_devices = config.num_partitions();
int64_t num_replicas = config.replica_count();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> participant_counts,
GetPariticipantCountsForReplicaGroups(
num_replicas, num_devices,
all_reduce->replica_groups(), group_mode));
if (participant_counts.empty()) {
return -1;
}
if (!absl::c_all_of(participant_counts, [&](int64_t participant_count) {
return participant_count == participant_counts[0];
})) {
return -1;
}
return participant_counts[0];
};
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if ((inst->opcode() == HloOpcode::kAllGather ||
inst->opcode() == HloOpcode::kReduceScatter) &&
ShapeUtil::Compatible(inst->shape(), inst->operand(0)->shape())) {
changed = true;
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(inst, inst->mutable_operand(0)));
}
}
}
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if (!inst->shape().IsArray()) {
continue;
}
if (!inst->IsCrossReplicaAllReduce() && !inst->IsCrossModuleAllReduce()) {
continue;
}
TF_ASSIGN_OR_RETURN(int64_t group_size,
get_participant_counts_for_replica_group(inst));
if (group_size == -1 ||
(!inst->IsCrossReplicaAllReduce() && group_size != 1) ||
(!inst->IsCrossReplicaAllReduce() &&
!module->config().use_spmd_partitioning())) {
continue;
}
if (replication->HloInstructionIsReplicatedAt(inst->operand(0), {}) ||
group_size == 1) {
all_reduces_to_replace.push_back({inst, group_size});
}
}
}
for (auto all_reduce_and_group_size : all_reduces_to_replace) {
auto all_reduce = all_reduce_and_group_size.first;
const int64_t replica_group_size = all_reduce_and_group_size.second;
if (replica_group_size == 1) {
TF_RETURN_IF_ERROR(all_reduce->parent()->ReplaceInstruction(
all_reduce, all_reduce->mutable_operand(0)));
changed = true;
continue;
}
if (all_reduce->to_apply()->instruction_count() != 3 ||
all_reduce->to_apply()->num_parameters() != 2) {
continue;
}
HloInstruction* replacement;
switch (all_reduce->to_apply()->root_instruction()->opcode()) {
case HloOpcode::kAdd: {
auto multiplier =
all_reduce->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(replica_group_size)));
if (all_reduce->shape().element_type() != S32) {
multiplier = all_reduce->parent()->AddInstruction(
HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(
multiplier->shape(), all_reduce->shape().element_type()),
multiplier));
}
if (all_reduce->shape().rank() > 0) {
multiplier = all_reduce->parent()->AddInstruction(
HloInstruction::CreateBroadcast(all_reduce->shape(), multiplier,
{}));
}
replacement =
all_reduce->parent()->AddInstruction(HloInstruction::CreateBinary(
all_reduce->shape(), HloOpcode::kMultiply,
all_reduce->mutable_operand(0), multiplier));
break;
}
case HloOpcode::kMinimum:
case HloOpcode::kMaximum:
case HloOpcode::kOr:
case HloOpcode::kAnd:
replacement = all_reduce->mutable_operand(0);
break;
default:
continue;
}
VLOG(2) << "Replacing " << all_reduce->ToString() << " with "
<< replacement->ToString();
TF_RETURN_IF_ERROR(all_reduce->ReplaceAllUsesWith(replacement));
changed = true;
}
return changed;
}
} | #include "xla/service/all_reduce_simplifier.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/window_util.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using AllReduceSimplifierTest = HloTestBase;
TEST_F(AllReduceSimplifierTest, ReplicatedParameters) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
min {
a.2 = f32[] parameter(0)
b.2 = f32[] parameter(1)
ROOT min = f32[] minimum(a.2, b.2)
}
sum.1 {
a.3 = f32[] parameter(0)
b.3 = f32[] parameter(1)
ROOT add.1 = f32[] add(a.3, b.3)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={true}
p1 = f32[8,16] parameter(1), parameter_replication={false}
p2 = f32[] parameter(2), parameter_replication={true}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=sum
all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max
all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={}, to_apply=min
all-reduce.3 = f32[] all-reduce(p2), replica_groups={}, to_apply=sum.1
ROOT tuple = (f32[8,16], f32[8,16], f32[8,16], f32[]) tuple(all-reduce, all-reduce.1, all-reduce.2, all-reduce.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MultiplyAnyOrder(m::Parameter(0),
m::Broadcast(m::Convert(m::ConstantScalar(8)))),
m::Parameter(0), m::AllReduce(m::Parameter(1)),
m::MultiplyAnyOrder(m::Parameter(2),
m::Convert(m::ConstantScalar(8))))));
}
TEST_F(AllReduceSimplifierTest, AllReduceAfterAllReduce) {
const char* kModuleStr = R"(
HloModule m
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max
ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AllReduce(m::Parameter(0)),
m::Broadcast(m::Convert(m::ConstantScalar(8))))));
}
TEST_F(AllReduceSimplifierTest, SubgroupAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
min {
a.2 = f32[] parameter(0)
b.2 = f32[] parameter(1)
ROOT min = f32[] minimum(a.2, b.2)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={true}
p1 = f32[8,16] parameter(1), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum
all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=max
all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=min
ROOT tuple = (f32[8,16], f32[8,16], f32[8,16]) tuple(all-reduce, all-reduce.1, all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MultiplyAnyOrder(m::Parameter(0),
m::Broadcast(m::Convert(m::ConstantScalar(4)))),
m::Parameter(0), m::AllReduce(m::Parameter(1)))));
}
TEST_F(AllReduceSimplifierTest, TrivialSubgroupAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
EXPECT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AllReduceSimplifierTest, TrivialSubgroupNonCrossReplicaAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
use_global_device_ids=true,
replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
AllReduceSimplifier simplifier(1);
EXPECT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AllReduceSimplifierTest, NonCrossReplicaAllReduceAfterAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
use_global_device_ids=true,
replica_groups={{0,2},{1,3},{4,6},{5,7}},
to_apply=sum
ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce),
channel_id=2,
use_global_device_ids=true,
replica_groups={{0,4},{1,5},{2,6},{3,7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
AllReduceSimplifier simplifier(1);
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(AllReduceSimplifierTest, MPMDNonCrossReplicaAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
replica_groups={{0},{1}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 2,
1));
module->mutable_config().set_use_spmd_partitioning(false);
AllReduceSimplifier simplifier(2);
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
}
} |
1,867 | cpp | tensorflow/tensorflow | while_loop_invariant_code_motion | third_party/xla/xla/service/while_loop_invariant_code_motion.cc | third_party/xla/xla/service/while_loop_invariant_code_motion_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_INVARIANT_CODE_MOTION_H_
#define XLA_SERVICE_WHILE_LOOP_INVARIANT_CODE_MOTION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compile_time_cap.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
class WhileLoopInvariantCodeMotion : public HloModulePass {
public:
using ShapeSizeFunction = std::function<int64_t(const Shape&)>;
explicit WhileLoopInvariantCodeMotion(
bool hoist_constants = false, bool hoist_reshapes = false,
bool hoist_other = true,
std::optional<float> hoist_size_inflation_ratio = std::nullopt,
ShapeSizeFunction shape_size_function = ShapeUtil::ByteSizeOfElements)
: hoist_constants_(hoist_constants),
hoist_reshapes_(hoist_reshapes),
hoist_other_(hoist_other),
hoist_size_inflation_ratio_(hoist_size_inflation_ratio),
shape_size_function_(shape_size_function) {}
~WhileLoopInvariantCodeMotion() override = default;
absl::string_view name() const override {
return "while-loop-invariant-code-motion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool NotWorthHoistingIndividually(const HloInstruction& instruction);
absl::StatusOr<bool> TryHoistingInvariantInstructionsFromWhileBody(
HloInstruction* while_instr, BoundNonLinearCompilerAnalysis* allowance);
bool hoist_constants_;
bool hoist_reshapes_;
bool hoist_other_;
std::optional<float> hoist_size_inflation_ratio_;
ShapeSizeFunction shape_size_function_;
};
}
#endif
#include "xla/service/while_loop_invariant_code_motion.h"
#include <cstdint>
#include <iterator>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/compile_time_cap.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/service/while_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::flat_hash_map;
using absl::flat_hash_set;
using absl::InlinedVector;
static void CreateLoopInvariantCopy(
flat_hash_map<HloInstruction*, HloInstruction*>* hoisted_instructions,
flat_hash_set<HloInstruction*>* unhoisted_invariant_instructions,
HloInstruction* while_instr, HloInstruction* to_hoist) {
HloComputation* parent_of_while = while_instr->parent();
HloComputation* while_body = while_instr->while_body();
struct DFSFrame {
HloInstruction* instruction;
int64_t operand_index;
};
InlinedVector<DFSFrame, 8> dfs_stack;
dfs_stack.push_back({to_hoist, 0});
HloInstruction* while_body_param = while_body->parameter_instruction(0);
HloInstruction* while_operand = while_instr->mutable_operand(0);
do {
DFSFrame* frame = &dfs_stack.back();
if (frame->operand_index == frame->instruction->operand_count()) {
HloInstruction* old_instruction = frame->instruction;
auto get_new_operand = [&](HloInstruction* old_operand) {
return old_operand == while_body_param
? while_operand
: FindOrDie(*hoisted_instructions, old_operand);
};
InlinedVector<HloInstruction*, 4> new_operands;
absl::c_transform(old_instruction->operands(),
std::back_inserter(new_operands), get_new_operand);
HloInstruction* new_instruction =
parent_of_while->AddInstruction(old_instruction->CloneWithNewOperands(
old_instruction->shape(), new_operands));
InsertOrDie(hoisted_instructions, old_instruction, new_instruction);
CHECK_EQ(unhoisted_invariant_instructions->erase(old_instruction),
to_hoist != old_instruction &&
old_instruction->opcode() != HloOpcode::kConstant);
dfs_stack.pop_back();
continue;
}
HloInstruction* next_operand =
frame->instruction->mutable_operand(frame->operand_index++);
if (hoisted_instructions->contains(next_operand) ||
next_operand == while_body_param) {
continue;
}
dfs_stack.push_back({next_operand, 0});
} while (!dfs_stack.empty());
}
bool WhileLoopInvariantCodeMotion::NotWorthHoistingIndividually(
const HloInstruction& instruction) {
if (instruction.IsCustomCall("Sharding")) {
return true;
}
switch (instruction.opcode()) {
default:
return false;
case HloOpcode::kConstant:
return !hoist_constants_;
case HloOpcode::kReshape:
return !hoist_reshapes_;
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kIota:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
return true;
}
}
absl::StatusOr<bool>
WhileLoopInvariantCodeMotion::TryHoistingInvariantInstructionsFromWhileBody(
HloInstruction* while_instr, BoundNonLinearCompilerAnalysis* allowance) {
auto print_no_metadata = HloPrintOptions{}.set_print_metadata(false);
if (!while_instr->shape().IsTuple()) {
return false;
}
std::string while_instr_name = while_instr->ToString(print_no_metadata);
VLOG(2) << "Trying to hoist from " << while_instr_name;
auto maybe_upper_bound = ComputeWhileLoopTripCountUpperBound(while_instr);
if (maybe_upper_bound && *maybe_upper_bound <= 1) {
VLOG(2) << "Loop has a trip count of at most 1, skipping.";
return false;
}
HloComputation* while_body = while_instr->while_body();
flat_hash_map<HloInstruction*, HloInstruction*> hoisted_instructions;
flat_hash_set<HloInstruction*> unhoisted_invariant_instructions;
for (auto* instr : WhileUtil::GetInvariantGTEsForWhileBody(*while_body)) {
if (instr->shape().IsArray()) {
InsertOrDie(&unhoisted_invariant_instructions, instr);
}
}
if (unhoisted_invariant_instructions.empty() && !hoist_constants_) {
return false;
}
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kDomain ||
instruction->IsCustomCall("SPMDFullToShardShape") ||
instruction->IsCustomCall("SPMDShardShapeToFull")) {
return false;
}
}
std::vector<HloInstruction*> instructions_to_replace;
std::vector<HloInstruction*> replacement_instructions;
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
allowance->DeductCost(1);
if (!allowance->ContinueAnalysis()) {
return false;
}
if (instruction->HasSideEffect() ||
instruction->opcode() == HloOpcode::kParameter ||
!instruction->control_predecessors().empty() ||
!instruction->control_successors().empty()) {
continue;
}
if (!hoist_other_ && instruction->opcode() != HloOpcode::kConstant &&
instruction->opcode() != HloOpcode::kReshape) {
continue;
}
if (hoist_size_inflation_ratio_ &&
instruction->opcode() != HloOpcode::kConstant) {
int64_t input_size = 0, output_size = 0;
for (auto* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(
operand->shape(), [&input_size, this](const Shape& subshape,
const ShapeIndex& ) {
if (subshape.IsArray()) {
input_size += shape_size_function_(subshape);
}
});
}
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&output_size, this](const Shape& subshape,
const ShapeIndex& ) {
if (subshape.IsArray()) {
output_size += shape_size_function_(subshape);
}
});
if (output_size > input_size * *hoist_size_inflation_ratio_) {
continue;
}
}
auto is_invariant = [&](HloInstruction* op) {
return hoisted_instructions.find(op) != hoisted_instructions.end() ||
unhoisted_invariant_instructions.contains(op) ||
op->opcode() == HloOpcode::kConstant;
};
if (!absl::c_all_of(instruction->operands(), is_invariant)) {
continue;
}
if (NotWorthHoistingIndividually(*instruction)) {
VLOG(2) << "Adding " << instruction->ToString(print_no_metadata)
<< " to unhoisted invariant set.";
if (instruction->opcode() != HloOpcode::kConstant) {
InsertOrDie(&unhoisted_invariant_instructions, instruction);
}
continue;
}
VLOG(2) << "Hoisting " << instruction->ToString(print_no_metadata);
CreateLoopInvariantCopy(&hoisted_instructions,
&unhoisted_invariant_instructions, while_instr,
instruction);
instructions_to_replace.push_back(instruction);
replacement_instructions.push_back(
FindOrDie(hoisted_instructions, instruction));
}
if (instructions_to_replace.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(
WhileUtil::MakeInstructionsLiveInResult live_in_instructions_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, replacement_instructions));
HloComputation* new_while_body =
live_in_instructions_result.new_while_instr->while_body();
for (int i = 0; i < instructions_to_replace.size(); i++) {
HloInstruction* instruction_to_replace_in_new_while =
FindOrDie(live_in_instructions_result.while_body_instruction_map,
instructions_to_replace[i]);
TF_RETURN_IF_ERROR(new_while_body->ReplaceInstruction(
instruction_to_replace_in_new_while,
live_in_instructions_result.while_body_live_in_values[i]));
}
VLOG(1) << "Hoisted " << instructions_to_replace.size()
<< " instructions from " << while_instr_name;
return true;
}
absl::StatusOr<bool> WhileLoopInvariantCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
BoundNonLinearCompilerAnalysis allowance(module, name(), 10);
for (HloInstruction* while_instr : while_instrs) {
if (!allowance.ContinueAnalysis()) {
break;
}
TF_ASSIGN_OR_RETURN(
bool result,
TryHoistingInvariantInstructionsFromWhileBody(while_instr, &allowance));
changed |= result;
}
if (changed) {
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module).status());
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after WhileLoopInvariantCodeMotion";
}
return changed;
}
} | #include "xla/service/while_loop_invariant_code_motion.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class WhileLoopInvariantCodeMotionTest : public HloTestBase {
public:
HloComputation* MakeAlwaysTrueComputation(const Shape& param_shape,
HloModule* module);
};
static void FindOnlyWhileInstruction(HloComputation* computation,
HloInstruction** while_instruction) {
*while_instruction = nullptr;
for (auto* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(*while_instruction, nullptr);
*while_instruction = instr;
}
}
ASSERT_NE(*while_instruction, nullptr);
}
HloComputation* WhileLoopInvariantCodeMotionTest::MakeAlwaysTrueComputation(
const Shape& param_shape, HloModule* module) {
HloComputation::Builder builder(TestName() + ".always_true");
builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
return module->AddEmbeddedComputation(builder.Build());
}
TEST_F(WhileLoopInvariantCodeMotionTest, HoistOneInvariantOperation) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
HloComputation* entry_computation = m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloInstruction* transformed_while;
FindOnlyWhileInstruction(entry_computation, &transformed_while);
EXPECT_THAT(entry_computation->instructions(), Contains(op::Add()));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::Add())));
}
TEST_F(WhileLoopInvariantCodeMotionTest, HoistInvariantOperationTree) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* gte_2_loop_variant = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 2));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
HloInstruction* mul_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kMultiply, add_result, gte_1));
HloInstruction* negate_result =
builder.AddInstruction(HloInstruction::CreateUnary(
scalar_s32, HloOpcode::kNegate, mul_result));
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(4)));
HloInstruction* sub_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kSubtract, negate_result, constant));
HloInstruction* divide_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kDivide, sub_result, gte_2_loop_variant));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, divide_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
HloComputation* entry_computation = m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloInstruction* transformed_while;
FindOnlyWhileInstruction(entry_computation, &transformed_while);
EXPECT_THAT(entry_computation->instructions(),
AllOf(Contains(op::Add()), Contains(op::Multiply()),
Contains(op::Negate()), Contains(op::Subtract()),
Contains(op::Constant()),
Not(Contains(op::Divide()))));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(AnyOf(op::Add(), op::Multiply(), op::Negate(),
op::Subtract(), op::Constant()))));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Contains(op::Divide()));
}
TEST_F(WhileLoopInvariantCodeMotionTest,
DontHoistTriviallyLoopVaryingComputation) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape = ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
builder.AddInstruction(HloInstruction::CreateTuple({gte_0, add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(), Contains(op::Add()));
}
TEST_F(WhileLoopInvariantCodeMotionTest,
DontHoistLoopVaryingComputationWithAlternatingTuples) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_1, gte_0, add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(), Contains(op::Add()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistInstructionWithSideEffects) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto token_shape = ShapeUtil::MakeTokenShape();
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, token_shape});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* in_token = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(token_shape, param, 2));
HloInstruction* out_token = builder.AddInstruction(
HloInstruction::CreateOutfeed(scalar_s32, gte_0, in_token, ""));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, out_token}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* scalar_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_s32, "param"));
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateTuple({scalar_param, scalar_param, token}));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, while_inst, 0));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
ASSERT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Outfeed()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistBitcastAlone) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto effective_scalar_s32 = ShapeUtil::MakeShape(S32, {1});
auto token_shape = ShapeUtil::MakeTokenShape();
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, token_shape});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* in_token = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(token_shape, param, 2));
HloInstruction* bitcast_inst =
builder.AddInstruction(HloInstruction::CreateUnary(
effective_scalar_s32, HloOpcode::kBitcast, gte_0));
HloInstruction* out_token =
builder.AddInstruction(HloInstruction::CreateOutfeed(
effective_scalar_s32, bitcast_inst, in_token, ""));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, out_token}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* scalar_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_s32, "param"));
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateTuple({scalar_param, scalar_param, token}));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, while_inst, 0));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Outfeed()));
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Bitcast()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, HoistBitcastIfNeeded) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto effective_scalar_s32 = ShapeUtil::MakeShape(S32, {1});
Shape while_shape = ShapeUtil::MakeTupleShape(
{scalar_s32, effective_scalar_s32, effective_scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(effective_scalar_s32, param, 1));
HloInstruction* bitcast_inst =
builder.AddInstruction(HloInstruction::CreateUnary(
effective_scalar_s32, HloOpcode::kBitcast, gte_0));
HloInstruction* add_inst =
builder.AddInstruction(HloInstruction::CreateBinary(
effective_scalar_s32, HloOpcode::kAdd, bitcast_inst, gte_1));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, add_inst}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
HloComputation* entry_computation = m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloInstruction* transformed_while;
FindOnlyWhileInstruction(entry_computation, &transformed_while);
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::Add())));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::Bitcast())));
EXPECT_THAT(entry_computation->instructions(), Contains(op::Add()));
EXPECT_THAT(entry_computation->instructions(), Contains(op::Bitcast()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistControlDependencies) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body;
{
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
TF_ASSERT_OK(param->AddControlDependencyTo(add_result));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, add_result}));
while_body = m->AddEmbeddedComputation(builder.Build());
}
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, BodyHasNonTupleRoot) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape = ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".passthrough");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloComputation* result = m->AddEmbeddedComputation(builder.Build());
result->AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
return result;
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
const char* const kConstantHoistingTestCase = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2]{0}) parameter(0)
p_body.1 = f32[2]{0} get-tuple-element(p_body), index=0
const = f32[2]{0} constant({3, 4})
add.0 = f32[2]{0} add(p_body.1, const)
ROOT root = (f32[2]{0}) tuple(add.0)
}
condition {
p_cond = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2]{0} constant({1, 2})
while_init = (f32[2]{0}) tuple(const_0)
ROOT while = (f32[2]{0}) while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopInvariantCodeMotionTest, HoistsConstantWhenAsked) {
auto m = ParseAndReturnVerifiedModule(kConstantHoistingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopInvariantCodeMotion{true}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
auto wide_param_1 = op::Parameter(0);
auto get_tuple_element_1 = op::GetTupleElement(wide_param_1, 0);
auto tuple_1 = op::Tuple(get_tuple_element_1);
auto get_tuple_element_4 = op::GetTupleElement(tuple_1, 0);
auto get_tuple_element_7 = op::GetTupleElement(wide_param_1, 1);
auto add_1 = op::Add(get_tuple_element_4, get_tuple_element_7);
auto tuple_3 = op::Tuple(add_1);
auto get_tuple_element_8 = op::GetTupleElement(tuple_3, 0);
auto get_tuple_element_9 = op::GetTupleElement(wide_param_1, 1);
auto tuple_4 = op::Tuple(get_tuple_element_8, get_tuple_element_9);
EXPECT_THAT(while_body->root_instruction(), tuple_4);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoesNotHoistConstantByDefault) {
auto m = ParseAndReturnVerifiedModule(kConstantHoistingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoNotHoistOutOfSingleIteration) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param.0, param.0, param.0, param.1)
ROOT while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
const char* const kInflatingTestCase = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[]) parameter(0)
iota = f32[1024, 1024] iota(), iota_dimension=0
add = f32[1024, 1024] add(iota, iota)
constant = f32[] constant(1.0)
reduce = f32[] reduce(f32[1024, 1024] add, f32[] constant), dimensions={0,1}, to_apply=mul
ROOT root = (f32[]) tuple(reduce)
}
condition {
p_cond = (f32[]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param = f32[] parameter(0)
while_init = (f32[]) tuple(param)
ROOT while = (f32[]) while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopInvariantCodeMotionTest, HoistsInflatingByDefault) {
auto m = ParseAndReturnVerifiedModule(kInflatingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopInvariantCodeMotion(true).Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Iota())));
}
TEST_F(WhileLoopInvariantCodeMotionTest, NoHoistInflating) {
auto m = ParseAndReturnVerifiedModule(kInflatingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopInvariantCodeMotion(false,
true,
1.0)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoesNotHoistSPMDFullToShardShape) {
auto m = CreateNewVerifiedModule();
auto array_s32 = ShapeUtil::MakeShape(S32, {4});
Shape while_shape =
ShapeUtil::MakeTupleShape({array_s32, array_s32, array_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(array_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(array_s32, param, 1));
HloInstruction* sharded_gte_1 = builder.AddInstruction(
HloInstruction::CreateCustomCall(array_s32, {gte_1}, "Sharding"));
sharded_gte_1->set_sharding(HloSharding::Tile1D(array_s32, 4));
HloInstruction* manually_sharded_gte_1 =
builder.AddInstruction(HloInstruction::CreateCustomCall(
array_s32, {sharded_gte_1}, "SPMDFullToShardShape"));
manually_sharded_gte_1->set_sharding(HloSharding::Manual());
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
array_s32, HloOpcode::kAdd, gte_0, manually_sharded_gte_1));
HloInstruction* manually_sharded_add_result = builder.AddInstruction(
HloInstruction::CreateCustomCall(array_s32, {add_result}, "Sharding"));
manually_sharded_add_result->set_sharding(HloSharding::Manual());
HloInstruction* sharded_add_result =
builder.AddInstruction(HloInstruction::CreateCustomCall(
array_s32, {manually_sharded_add_result}, "SPMDShardShapeToFull"));
sharded_add_result->set_sharding(HloSharding::Tile1D(array_s32, 4));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, sharded_add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
LOG(INFO) << "my_test: " << m->ToString();
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoesNotHoistShardingCustomCalls) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], f32[2], s32[]) parameter(0)
gte.0 = f32[2] get-tuple-element(p_body), index=0
gte.1 = f32[2] get-tuple-element(p_body), index=1
sharding.0 = f32[2] custom-call(gte.0), custom_call_target="Sharding", sharding={devices=[2]<=[2]}
sharding.1 = f32[2] custom-call(gte.1), custom_call_target="Sharding", sharding={replicated}
add.0 = f32[2] add(sharding.0, sharding.1)
gte.2 = s32[] get-tuple-element(p_body), index=2
const = s32[] constant(1)
add.1 = s32[] add(gte.2, const)
ROOT root = (f32[2], f32[2], s32[]) tuple(gte.0, add.0, add.1)
}
condition {
p_cond = (f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=2
const = s32[] constant(5)
ROOT result = pred[] compare(gte, const), direction=LT
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], f32[2], s32[]) tuple(param.0, param.0, param.1)
ROOT while = (f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
}
} |
1,868 | cpp | tensorflow/tensorflow | dynamic_index_splitter | third_party/xla/xla/service/dynamic_index_splitter.cc | third_party/xla/xla/service/dynamic_index_splitter_test.cc | #ifndef XLA_SERVICE_DYNAMIC_INDEX_SPLITTER_H_
#define XLA_SERVICE_DYNAMIC_INDEX_SPLITTER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class DynamicIndexSplitter : public HloModulePass {
public:
DynamicIndexSplitter() = default;
absl::string_view name() const override { return "dynamic-index-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/dynamic_index_splitter.h"
#include <map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> DynamicIndexSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> computations =
module->MakeNonfusionComputations(execution_threads);
for (HloComputation* computation : computations) {
for (HloInstruction* dynamic_op : computation->MakeInstructionPostOrder()) {
switch (dynamic_op->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
break;
default:
continue;
}
auto parent = dynamic_op->parent();
bool is_update = dynamic_op->opcode() == HloOpcode::kDynamicUpdateSlice;
int64_t num_indices = dynamic_op->operand(0)->shape().rank();
if (num_indices == 0) {
if (is_update) {
TF_CHECK_OK(parent->ReplaceInstruction(
dynamic_op, dynamic_op->mutable_operand(1)));
} else {
TF_CHECK_OK(parent->ReplaceInstruction(
dynamic_op, dynamic_op->mutable_operand(0)));
}
changed = true;
continue;
}
int64_t index_operand_number =
Cast<HloDynamicIndexInstruction>(dynamic_op)
->first_index_operand_number();
auto index_operand = dynamic_op->mutable_operand(index_operand_number);
if (ShapeUtil::IsScalar(index_operand->shape())) {
continue;
}
TF_RET_CHECK(index_operand->shape().rank() == 1);
auto index_element_type = index_operand->shape().element_type();
std::vector<HloInstruction*> index_array;
index_array.reserve(num_indices);
for (int64_t dim = 0; dim < num_indices; ++dim) {
auto slice = parent->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(index_element_type, {1}), index_operand, {dim},
{dim + 1}, {1}));
auto bitcast = parent->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(index_element_type, {}), slice));
index_array.push_back(bitcast);
}
auto new_dynamic_op =
is_update
? HloInstruction::CreateDynamicUpdateSlice(
dynamic_op->shape(), dynamic_op->mutable_operand(0),
dynamic_op->mutable_operand(1), absl::MakeSpan(index_array))
: HloInstruction::CreateDynamicSlice(
dynamic_op->shape(), dynamic_op->mutable_operand(0),
absl::MakeSpan(index_array),
dynamic_op->dynamic_slice_sizes());
TF_CHECK_OK(parent->ReplaceWithNewInstruction(dynamic_op,
std::move(new_dynamic_op)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/dynamic_index_splitter.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class DynamicIndexSplitterTest : public HloTestBase {};
TEST_F(DynamicIndexSplitterTest, DynamicSlice) {
const char* const kDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY entry (operand: s32[4,5,6], indices: s32[3]) -> s32[1,1,1] {
operand = s32[4,5,6] parameter(0)
indices = s32[3] parameter(1)
ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, indices), dynamic_slice_sizes={1,1,1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDynamicSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Parameter(0),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1)))));
for (int i = 0; i < 3; ++i) {
const HloInstruction* slice = module->entry_computation()
->root_instruction()
->operand(i + 1)
->operand(0);
EXPECT_EQ(slice->slice_starts(0), i);
EXPECT_EQ(slice->slice_limits(0), i + 1);
}
}
TEST_F(DynamicIndexSplitterTest, DynamicUpdateSlice) {
const char* const kDynamicUpdateSlice = R"(
HloModule DynamicUpdatedSlice_module
ENTRY entry (operand: s32[4,5,6], indices: s32[3], update: s32[1,1,1]) -> s32[4,5,6] {
operand = s32[4,5,6] parameter(0)
indices = s32[3] parameter(1)
update = s32[1,1,1] parameter(2)
ROOT dynamic-update-slice = s32[4,5,6] dynamic-update-slice(operand, update, indices)
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kDynamicUpdateSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
op::DynamicUpdateSlice(op::Parameter(0), op::Parameter(2),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1)))));
for (int i = 0; i < 3; ++i) {
const HloInstruction* slice = module->entry_computation()
->root_instruction()
->operand(i + 2)
->operand(0);
EXPECT_EQ(slice->slice_starts(0), i);
EXPECT_EQ(slice->slice_limits(0), i + 1);
}
}
TEST_F(DynamicIndexSplitterTest, AlreadyScalar) {
const char* const kDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY entry (operand: s32[4,5,6], index.0: s32[], index.1: s32[], index.2: s32[]) -> s32[1,1,1] {
operand = s32[4,5,6] parameter(0)
index.0 = s32[] parameter(1)
index.1 = s32[] parameter(2)
index.2 = s32[] parameter(3)
ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, index.0, index.1, index.2), dynamic_slice_sizes={1,1,1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDynamicSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)));
}
}
} |
1,869 | cpp | tensorflow/tensorflow | hlo_creation_utils | third_party/xla/xla/service/hlo_creation_utils.cc | third_party/xla/xla/service/hlo_creation_utils_test.cc | #ifndef XLA_SERVICE_HLO_CREATION_UTILS_H_
#define XLA_SERVICE_HLO_CREATION_UTILS_H_
#include <cstddef>
#include <memory>
#include <optional>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
absl::StatusOr<HloInstruction*> MakeUnaryHlo(
HloOpcode opcode, HloInstruction* operand,
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeBinaryHlo(
HloOpcode opcode, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
HloInstruction* MakeCopyHlo(HloInstruction* from, const Shape& to);
absl::StatusOr<HloInstruction*> MakeCompareHlo(
Comparison::Direction direction, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakePadHlo(
HloInstruction* operand, HloInstruction* padding_value,
const PaddingConfig& padding_config, const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeSliceHlo(
HloInstruction* operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices, absl::Span<const int64_t> strides,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeConvolveHlo(
HloInstruction* lhs, HloInstruction* rhs, int64_t feature_group_count,
int64_t batch_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeTransposeHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions);
absl::StatusOr<HloInstruction*> MakeReshapeHlo(const Shape& result_shape,
HloInstruction* operand);
absl::StatusOr<HloInstruction*> MakeReshapeHlo(
absl::Span<const int64_t> result_shape_dim_bounds, HloInstruction* operand);
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, absl::Span<HloInstruction* const> start_indices,
absl::Span<const int64_t> slice_sizes,
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, HloInstruction* start_indices,
absl::Span<const int64_t> slice_sizes,
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
HloInstruction* start_indices, const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
absl::Span<HloInstruction* const> start_indices,
const OpMetadata* metadata = nullptr);
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
absl::Span<const int64_t> result_shape_bounds,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
const Shape& shape, const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeGetTupleElementHlo(
HloInstruction* operand, int64_t index,
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeConcatHlo(
absl::Span<HloInstruction* const> operands, int64_t dimension,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
HloInstruction* MakeConvertToHlo(HloInstruction* hlo, PrimitiveType type,
const OpMetadata* metadata = nullptr);
HloInstruction* MakeBitcastHlo(HloInstruction* hlo, const Shape& shape,
const OpMetadata* metadata = nullptr);
HloInstruction* MakeBitcastConvertToHlo(HloInstruction* hlo, PrimitiveType type,
const OpMetadata* metadata = nullptr);
HloInstruction* MakeIotaHlo(HloComputation* computation, const Shape& shape,
int64_t iota_dimension);
absl::StatusOr<HloInstruction*> MakeDotHlo(
HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::vector<SparsityDescriptor> sparsity = {},
absl::Span<HloInstruction* const> sparse_meta = {},
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeMapHlo(
absl::Span<HloInstruction* const> operands, HloComputation* map_computation,
const OpMetadata* metadata = nullptr);
HloInstruction* MakeReducePrecisionHlo(HloInstruction* operand,
int exponent_bits, int mantissa_bits,
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeReduceWindowHlo(
HloInstruction* operand, HloInstruction* init_value, const Window& window,
HloComputation* reduce_computation, const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeReduceWindowHlo(
HloInstruction* operand, HloInstruction* init_value, const Window& window,
HloOpcode binary_opcode, const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloOpcode binary_opcode,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloComputation* reduce_computation,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
HloOpcode binary_opcode, HloModule* module,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeReduceHlo(
absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions, HloComputation* reduce_computation,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
absl::StatusOr<HloInstruction*> MakeReverseHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions,
const OpMetadata* metadata = nullptr);
absl::StatusOr<HloInstruction*> MakeSelectHlo(
HloInstruction* pred, HloInstruction* on_true, HloInstruction* on_false,
HloInstruction* derived_from = nullptr,
const OpMetadata* metadata = nullptr,
const FrontendAttributes* frontend_attributes = nullptr);
HloInstruction* MaybeMakeTuple(absl::Span<HloInstruction* const> operands);
absl::StatusOr<HloInstruction*> MakeSortHlo(
const Shape& sort_shape, absl::Span<HloInstruction* const> operands,
int64_t dimension_to_sort, bool is_stable, HloComputation::Builder* builder,
HloModule* module, const OpMetadata* metadata = nullptr);
template <typename NativeT>
absl::StatusOr<HloInstruction*> MakeR1ConstantHlo(
HloComputation* computation, PrimitiveType type,
absl::Span<const NativeT> values) {
Literal literal = LiteralUtil::CreateR1<NativeT>(values);
if (literal.shape().element_type() != type) {
TF_ASSIGN_OR_RETURN(literal, literal.Convert(type));
}
return computation->AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
}
template <class NativeT>
HloInstruction* MakeR0ConstantHlo(HloComputation* computation, NativeT value) {
return computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<NativeT>(value)));
}
template <class NativeT>
HloInstruction* MakeScalarLike(HloInstruction* base, NativeT value) {
auto scalar = base->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<NativeT>(value)
.Convert(base->shape().element_type())
.value()));
if (base->shape().rank() == 0) {
*scalar->mutable_shape() = base->shape();
return scalar;
}
return base->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(base->shape()), scalar, {}));
}
absl::StatusOr<HloInstruction*> MakeFusionInstruction(
HloInstruction* fused, HloInstruction::FusionKind kind);
absl::StatusOr<HloInstruction*> CollapseFirstNDims(HloInstruction* operand,
int64_t n);
absl::StatusOr<HloInstruction*> PrependDegenerateDims(HloInstruction* operand,
int64_t n);
absl::StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
HloInstruction* operand, absl::Span<const int64_t> expanded_dims);
absl::StatusOr<HloInstruction*> ElideDegenerateDims(
HloInstruction* operand, absl::Span<const int64_t> dims_to_elide);
absl::StatusOr<HloInstruction*> InsertDegenerateDims(
HloInstruction* operand, absl::Span<const int64_t> dims_to_insert);
absl::StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
int64_t zeros_to_prepend,
int64_t zeros_to_append);
HloInstruction* BroadcastZeros(HloComputation* computation,
PrimitiveType element_type,
absl::Span<const int64_t> broadcast_dimensions);
HloInstruction* BroadcastZeros(HloComputation* computation,
const Shape& broadcast_shape);
HloInstruction* BroadcastOnes(HloComputation* computation,
PrimitiveType element_type,
absl::Span<const int64_t> broadcast_dimensions);
absl::StatusOr<std::unique_ptr<HloComputation>> CreateComputationWithSignature(
absl::Span<const Shape* const> domain, const Shape& range,
absl::string_view name);
HloInstruction* ExpandDegenerateReshape(HloInstruction* inst);
std::unique_ptr<HloInstruction> MakeConstantWithShape(const Shape& shape,
int64_t value);
}
#endif
#include "xla/service/hlo_creation_utils.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/lib/comparators.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
absl::StatusOr<HloInstruction*> MakeUnaryHlo(HloOpcode opcode,
HloInstruction* operand,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape unary_op_shape,
ShapeInference::InferUnaryOpShape(opcode, operand));
return computation->AddInstruction(
HloInstruction::CreateUnary(unary_op_shape, opcode, operand), metadata);
}
HloInstruction* MakeCopyHlo(HloInstruction* from, const Shape& to) {
return from->AddInstruction(
HloInstruction::CreateUnary(to, HloOpcode::kCopy, from));
}
absl::StatusOr<HloInstruction*> MakeBinaryHlo(
HloOpcode opcode, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(Shape binary_op_shape,
ShapeInference::InferBinaryOpShape(opcode, lhs, rhs));
return computation->AddInstruction(
HloInstruction::CreateBinary(binary_op_shape, opcode, lhs, rhs), metadata,
frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeCompareHlo(
ComparisonDirection direction, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
Shape binary_op_shape,
ShapeInference::InferBinaryOpShape(HloOpcode::kCompare, lhs, rhs));
return computation->AddInstruction(
HloInstruction::CreateCompare(binary_op_shape, lhs, rhs, direction),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakePadHlo(
HloInstruction* operand, HloInstruction* padding_value,
const PaddingConfig& padding_config, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, padding_value->parent());
TF_ASSIGN_OR_RETURN(
Shape pad_shape,
ShapeInference::InferPadShape(operand->shape(), padding_value->shape(),
padding_config));
return computation->AddInstruction(
HloInstruction::CreatePad(pad_shape, operand, padding_value,
padding_config),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeSliceHlo(
HloInstruction* operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices, absl::Span<const int64_t> strides,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape slice_shape, ShapeInference::InferSliceShape(
operand->shape(), start_indices,
limit_indices, strides));
return computation->AddInstruction(
HloInstruction::CreateSlice(slice_shape, operand, start_indices,
limit_indices, strides),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeConvolveHlo(
HloInstruction* lhs, HloInstruction* rhs, int64_t feature_group_count,
int64_t batch_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
Shape convolve_shape,
ShapeInference::InferConvolveShape(
lhs->shape(), rhs->shape(), feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
return computation->AddInstruction(
HloInstruction::CreateConvolve(
convolve_shape, lhs, rhs, feature_group_count, batch_group_count,
window, dimension_numbers, precision_config),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeTransposeHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions) {
TF_ASSIGN_OR_RETURN(
Shape transpose_shape,
ShapeInference::InferTransposeShape(operand->shape(), dimensions));
return operand->AddInstruction(
HloInstruction::CreateTranspose(transpose_shape, operand, dimensions));
}
absl::StatusOr<HloInstruction*> MakeReshapeHlo(const Shape& result_shape,
HloInstruction* operand) {
return operand->AddInstruction(
HloInstruction::CreateReshape(result_shape, operand));
}
absl::StatusOr<HloInstruction*> MakeReshapeHlo(
absl::Span<const int64_t> result_shape_dim_bounds,
HloInstruction* operand) {
Shape new_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_dim_bounds);
return MakeReshapeHlo(new_shape, operand);
}
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, absl::Span<HloInstruction* const> start_indices,
absl::Span<const int64_t> slice_sizes, const OpMetadata* metadata) {
if (start_indices.empty() || slice_sizes.empty()) {
return operand;
}
HloComputation* computation = operand->parent();
std::vector<Shape> scalar_start_indices_shapes(
start_indices.size(),
ShapeUtil::MakeShape(start_indices[0]->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_slice_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(), scalar_start_indices_shapes, slice_sizes));
return computation->AddInstruction(
HloInstruction::CreateDynamicSlice(dynamic_slice_shape, operand,
start_indices, slice_sizes),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, HloInstruction* start_indices,
absl::Span<const int64_t> slice_sizes, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, start_indices->parent());
int64_t rank = start_indices->shape().dimensions(0);
std::vector<HloInstruction*> scalar_start_indices;
for (int i = 0; i < rank; ++i) {
auto slice = computation->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {1}),
start_indices, {i}, {i + 1}, {1}));
scalar_start_indices.push_back(
computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {}),
slice)));
}
std::vector<Shape> scalar_start_indices_shapes(
rank, ShapeUtil::MakeShape(start_indices->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_slice_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(), scalar_start_indices_shapes, slice_sizes));
return computation->AddInstruction(
HloInstruction::CreateDynamicSlice(dynamic_slice_shape, operand,
scalar_start_indices, slice_sizes),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
HloInstruction* start_indices, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, update->parent());
CHECK_EQ(computation, start_indices->parent());
int64_t rank = start_indices->shape().dimensions(0);
std::vector<HloInstruction*> scalar_start_indices;
for (int i = 0; i < rank; ++i) {
auto slice = computation->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {1}),
start_indices, {i}, {i + 1}, {1}));
scalar_start_indices.push_back(
computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {}),
slice)));
}
std::vector<Shape> scalar_start_indices_shapes(
rank, ShapeUtil::MakeShape(start_indices->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_update_slice_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(), scalar_start_indices_shapes));
return computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(
dynamic_update_slice_shape, operand, update, scalar_start_indices),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
absl::Span<HloInstruction* const> start_indices,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, update->parent());
std::vector<Shape> scalar_start_indices_shapes;
scalar_start_indices_shapes.reserve(start_indices.size());
for (auto start_index : start_indices) {
scalar_start_indices_shapes.push_back(start_index->shape());
}
TF_ASSIGN_OR_RETURN(
Shape dynamic_update_slice_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(), scalar_start_indices_shapes));
return computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(dynamic_update_slice_shape,
operand, update, start_indices),
metadata);
}
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
absl::Span<const int64_t> result_shape_bounds, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
Shape broadcast_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_bounds);
return MakeBroadcastHlo(operand, broadcast_dimensions, broadcast_shape,
metadata, frontend_attributes);
}
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
const Shape& shape, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
return computation->AddInstruction(
HloInstruction::CreateBroadcast(shape, operand, broadcast_dimensions),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeGetTupleElementHlo(
HloInstruction* operand, int64_t index, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(
Shape gte_shape,
ShapeInference::InferGetTupleElementShape(operand->shape(), index));
return computation->AddInstruction(
HloInstruction::CreateGetTupleElement(gte_shape, operand, index),
metadata);
}
absl::StatusOr<HloInstruction*> MakeConcatHlo(
absl::Span<HloInstruction* const> operands, int64_t dimension,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
CHECK_GT(operands.size(), 0);
HloComputation* computation = operands[0]->pa | #include "xla/service/hlo_creation_utils.h"
#include <memory>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = match;
class HloCreationUtilsTest : public HloTestBase {
protected:
std::unique_ptr<VerifiedHloModule> CreateModuleWithProgramShape(
PrimitiveType primitive_type, absl::Span<const int64_t> input_shape_dims,
absl::Span<const int64_t> output_shape_dims, HloInstruction** param,
HloComputation** entry_computation) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type, output_shape_dims);
auto module = CreateNewVerifiedModule("test");
*entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape}, output_shape, "entry")
.value());
*param = (*entry_computation)->parameter_instruction(0);
return module;
}
std::unique_ptr<VerifiedHloModule> CreateModuleWithProgramShape(
PrimitiveType primitive_type, absl::Span<const int64_t> input_shape_dims,
absl::Span<const int64_t> output_shape_dims, HloInstruction** param,
HloComputation** entry_computation, PrimitiveType primitive_type_output) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type_output, output_shape_dims);
auto module = CreateNewVerifiedModule("test");
*entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape}, output_shape, "entry")
.value());
*param = (*entry_computation)->parameter_instruction(0);
return module;
}
};
TEST_F(HloCreationUtilsTest, CollapseFirst1Dim) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{2}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_1_dims_collapsed,
CollapseFirstNDims(param, 1));
entry_computation->set_root_instruction(first_1_dims_collapsed);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({3, 4})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR1<int32_t>({3, 4}));
}
TEST_F(HloCreationUtilsTest, CollapseFirst2Dims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(
S32, {2, 3, 2}, {6, 2}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_2_dims_collapsed,
CollapseFirstNDims(param, 2));
entry_computation->set_root_instruction(first_2_dims_collapsed);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}},
{{-1, -2}, {-3, -4}, {-5, -6}}})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<int32_t>(
{{1, 2}, {3, 4}, {5, 6}, {-1, -2}, {-3, -4}, {-5, -6}}));
}
TEST_F(HloCreationUtilsTest, Prepend1DegenerateDim) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_1_degenerate_dim_prepended,
PrependDegenerateDims(param, 1));
entry_computation->set_root_instruction(with_1_degenerate_dim_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({9, 10})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{9, 10}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{1, 1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_2_degenerate_dims_prepended,
PrependDegenerateDims(param, 2));
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({9, 10})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR3<int32_t>({{{9, 10}}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDimsToScalar) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{1, 1},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_2_degenerate_dims_prepended,
PrependDegenerateDims(param, 2));
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(9)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{9}}));
}
TEST_F(HloCreationUtilsTest, ExpandFirstDimInto3Dims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {6},
{3, 1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_dim_expanded,
ExpandFirstDimIntoNDims(param, {3, 1, 2}));
entry_computation->set_root_instruction(first_dim_expanded);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR3<int32_t>({{{1, 2}}, {{3, 4}}, {{5, 6}}}));
}
TEST_F(HloCreationUtilsTest, PadVectorWithZeros) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{6}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * zero_padded_param,
PadVectorWithZeros(param, 3, 1));
entry_computation->set_root_instruction(zero_padded_param);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({3, 4})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR1<int32_t>({0, 0, 0, 3, 4, 0}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_S32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation);
HloInstruction* zeros =
BroadcastZeros(module->entry_computation(), S32, {2, 2});
entry_computation->set_root_instruction(zeros);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_F32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {},
{2, 2},
¶m, &entry_computation);
HloInstruction* zeros =
BroadcastZeros(module->entry_computation(), F32, {2, 2});
entry_computation->set_root_instruction(zeros);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBitcastConvertToHlo_S32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2, 2},
{2, 2},
¶m, &entry_computation, F32);
auto* input = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})));
HloInstruction* output = MakeBitcastConvertToHlo(input, F32);
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeIotaHlo_I32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation, F32);
HloInstruction* output = MakeIotaHlo(module->entry_computation(),
ShapeUtil::MakeShape(F32, {2, 2}), 0);
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0.0)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {1.0f, 1.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBroadcast_F32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {},
{2, 2},
¶m, &entry_computation);
auto* input = MakeR0ConstantHlo<float>(module->entry_computation(), 0);
HloInstruction* output = MakeBroadcastHlo(input, {}, {2, 2});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBroadcast_Shape_I32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation);
auto* input = MakeR0ConstantHlo<int32_t>(module->entry_computation(), 0);
HloInstruction* output =
MakeBroadcastHlo(input, {}, ShapeUtil::MakeShape(S32, {2, 2}));
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0.0)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleCrashesWithEmptyOperands) {
EXPECT_DEATH(MaybeMakeTuple({}), "");
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleForwardsSingleElement) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2, 2},
{2, 2},
¶m, &entry_computation);
HloInstruction* output = MaybeMakeTuple({param});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})}));
EXPECT_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleTuplizesMultipleOperands) {
Shape input_shape0 = ShapeUtil::MakeShape(S32, {2});
Shape input_shape1 = ShapeUtil::MakeShape(F32, {3, 3});
Shape output_shape =
ShapeUtil::MakeTupleShapeWithPtrs({&input_shape1, &input_shape0});
auto module = CreateNewVerifiedModule("test");
HloComputation* entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape0, &input_shape1},
output_shape, "entry")
.value());
HloInstruction* output =
MaybeMakeTuple({entry_computation->parameter_instruction(1),
entry_computation->parameter_instruction(0)});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
Literal input0 = LiteralUtil::CreateR1<int32_t>({{2, 4}});
Literal input1 =
LiteralUtil::CreateR2<float>({{3, 2, 1}, {4, 5, 6}, {9, 8, 7}});
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {input0.Clone(), input1.Clone()}));
Literal expected_result = LiteralUtil::MakeTuple({&input1, &input0});
EXPECT_EQ(result_literal, expected_result);
}
TEST_F(HloCreationUtilsTest, DynamicUpdateSliceVectorStartIndices) {
auto module = CreateNewVerifiedModule("dus-creation-test");
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
Shape input_shape = ShapeUtil::MakeShape(F64, {2, 3});
Shape update_shape = ShapeUtil::MakeShape(F64, {2, 2});
HloComputation* entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape, &update_shape}, input_shape,
"entry")
.value());
auto zero = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto update = LiteralUtil::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}});
HloInstruction* dus =
MakeDynamicUpdateSliceHlo(entry_computation->parameter_instruction(0),
entry_computation->parameter_instruction(1),
{zero, one})
.value();
entry_computation->set_root_instruction(dus);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result, evaluator.Evaluate(*module, {&operand_literal, &update}));
auto expected = LiteralUtil::CreateR2<double>({
{1, -2, -3},
{5, -6, -7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloCreationUtilsTest, ExpandDegenerateReshape) {
const char* hlo_string = R"(
HloModule module
ENTRY test {
param = f32[12,1,10,32,8] parameter(0)
ROOT reshape = f32[1,12,10,1,32,1,8] reshape(param)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto expanded =
ExpandDegenerateReshape(module->entry_computation()->root_instruction());
EXPECT_THAT(expanded, GmockMatch(m::Reshape(m::Reshape(
m::Reshape(m::Reshape(m::Parameter(0)))))));
}
TEST_F(HloCreationUtilsTest, ReduceWindow) {
const Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
HloComputation* addition = [&] {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module->AddEmbeddedComputation(embedded_builder.Build());
}();
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
Shape expected_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * reduce_window,
MakeReduceWindowHlo(a_param, init, window, addition));
module->entry_computation()->set_root_instruction(
reduce_window,
true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
EXPECT_EQ(module->entry_computation()->root_instruction()->shape(),
expected_output_shape);
}
TEST_F(HloCreationUtilsTest, ReduceWindowBinaryOpcode) {
const Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
Shape expected_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * reduce_window,
MakeReduceWindowHlo(a_param, init, window, HloOpcode::kAdd));
module->entry_computation()->set_root_instruction(
reduce_window,
true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
EXPECT_EQ(module->entry_computation()->root_instruction()->shape(),
expected_output_shape);
}
TEST_F(HloCreationUtilsTest, DynamicBroadcastShape) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {10},
{10}, ¶m,
&entry_computation);
param->mutable_shape()->set_dynamic_dimension(0, true);
HloInstruction* one_constant = MakeScalarLike(param, 1.0f);
EXPECT_TRUE(one_constant->shape().is_static());
}
}
} |
1,870 | cpp | tensorflow/tensorflow | hlo_memory_scheduler | third_party/xla/xla/service/hlo_memory_scheduler.cc | third_party/xla/xla/service/hlo_memory_scheduler_test.cc | #ifndef XLA_SERVICE_HLO_MEMORY_SCHEDULER_H_
#define XLA_SERVICE_HLO_MEMORY_SCHEDULER_H_
#include <cstdint>
#include <functional>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
namespace xla {
using MemorySchedulerPostprocessor =
std::function<HloInstructionSequence(const HloInstructionSequence&)>;
using MemorySchedulerAlgorithm =
std::function<absl::StatusOr<HloInstructionSequence>(
HloComputation*, const TuplePointsToAnalysis&, const HloAliasAnalysis&,
const LogicalBuffer::SizeFunction&,
const absl::flat_hash_map<const HloComputation*, int64_t>&,
const MemorySchedulerPostprocessor&,
int64_t*)>;
using ModuleSchedulerAlgorithm = std::function<absl::StatusOr<HloSchedule>(
const HloModule*, const TuplePointsToAnalysis&, const HloAliasAnalysis&,
const LogicalBuffer::SizeFunction&,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t*)>;
ModuleSchedulerAlgorithm ComputationSchedulerToModuleScheduler(
const MemorySchedulerAlgorithm&, const MemorySchedulerPostprocessor& = {});
absl::StatusOr<HloInstructionSequence> ListMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory);
absl::StatusOr<HloInstructionSequence> DFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory);
absl::StatusOr<HloInstructionSequence> BFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory);
absl::StatusOr<HloInstructionSequence> PostOrderMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory);
absl::StatusOr<HloInstructionSequence> DefaultMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory);
absl::StatusOr<HloSchedule> DefaultModuleScheduler(
const HloModule* module, const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory);
absl::StatusOr<HloSchedule> ScheduleModule(
const HloModule* module, const LogicalBuffer::SizeFunction& size_function,
const ModuleSchedulerAlgorithm& algorithm = {},
const absl::flat_hash_set<absl::string_view>& execution_threads = {},
int64_t* peak_memory = nullptr);
absl::StatusOr<HloInstructionSequence> ScheduleComputation(
HloComputation* computation,
const LogicalBuffer::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor);
class HloMemoryScheduler : public HloModulePass {
public:
explicit HloMemoryScheduler(const LogicalBuffer::SizeFunction& size_function,
const ModuleSchedulerAlgorithm& algorithm = {});
~HloMemoryScheduler() override = default;
absl::string_view name() const override { return "hlo-memory-scheduler"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
LogicalBuffer::SizeFunction size_function_;
ModuleSchedulerAlgorithm algorithm_;
};
class HloTrivialScheduler : public HloModulePass {
public:
absl::string_view name() const override { return "hlo-trivial-scheduler"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
class HloDescheduler : public HloModulePass {
public:
HloDescheduler() = default;
~HloDescheduler() override = default;
absl::string_view name() const override { return "hlo-descheduler"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/hlo_memory_scheduler.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace {
using ::tsl::strings::HumanReadableNumBytes;
class ListScheduler {
public:
static absl::StatusOr<HloInstructionSequence> Run(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation) {
ListScheduler scheduler(computation, points_to_analysis, size_function,
memory_by_computation);
return scheduler.CreateSchedule();
}
static bool IgnoreInstruction(const HloInstruction& instruction) {
return instruction.opcode() == HloOpcode::kParameter ||
instruction.opcode() == HloOpcode::kConstant;
}
private:
using Priority = std::pair<int64_t, int64_t>;
ListScheduler(HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation)
: computation_(computation),
points_to_analysis_(points_to_analysis),
size_function_(size_function),
memory_by_computation_(memory_by_computation) {
for (auto* instruction : computation->instructions()) {
absl::flat_hash_set<const LogicalBuffer*> instr_uses;
for (auto* operand : instruction->operands()) {
points_to_analysis.GetPointsToSet(operand).ForEachElement(
[&](const ShapeIndex& ,
const PointsToSet::BufferList& buffers) {
instr_uses.insert(buffers.begin(), buffers.end());
});
}
buffer_uses_[instruction] = std::vector<const LogicalBuffer*>(
instr_uses.begin(), instr_uses.end());
}
unscheduled_use_count_.reserve(points_to_analysis.num_logical_buffers());
for (auto* instruction : computation->instructions()) {
for (auto* buffer :
points_to_analysis.GetBuffersDefinedByInstruction(instruction)) {
unscheduled_use_count_[buffer] = 0;
}
}
for (auto* instruction : computation->instructions()) {
for (const LogicalBuffer* buffer : buffer_uses_.at(instruction)) {
++unscheduled_use_count_[buffer];
}
}
for (const LogicalBuffer* live_out_buffer :
points_to_analysis.GetPointsToSet(computation->root_instruction())
.CreateFlattenedSet()) {
++unscheduled_use_count_[live_out_buffer];
}
}
static bool IgnoreBuffer(const LogicalBuffer& buffer) {
return IgnoreInstruction(*buffer.instruction());
}
struct ReadyListEntry {
HloInstruction* instruction;
int64_t bytes_defined;
std::vector<const std::pair<const LogicalBuffer* const, int64_t>*>
used_buffer_unscheduled_use_counts;
};
ReadyListEntry MakeReadyListEntry(HloInstruction* instruction) {
ReadyListEntry entry;
entry.instruction = instruction;
entry.bytes_defined = 0;
for (auto* buffer :
points_to_analysis_.GetBuffersDefinedByInstruction(instruction)) {
if (!IgnoreBuffer(*buffer)) {
entry.bytes_defined += size_function_(*buffer);
}
}
for (auto* buffer : buffer_uses_.at(instruction)) {
if (IgnoreBuffer(*buffer)) {
continue;
}
auto unscheduled_use_count_it = unscheduled_use_count_.find(buffer);
CHECK(unscheduled_use_count_it != unscheduled_use_count_.end());
entry.used_buffer_unscheduled_use_counts.push_back(
&*unscheduled_use_count_it);
}
return entry;
}
int64_t BytesFreedIfScheduled(const ReadyListEntry& entry) {
auto instruction = entry.instruction;
auto opcode = instruction->opcode();
if (opcode == HloOpcode::kOutfeed &&
!instruction->outfeed_config().empty()) {
return INT_MAX;
}
if (opcode == HloOpcode::kInfeed && !instruction->infeed_config().empty()) {
return INT_MIN;
}
int64_t freed_bytes = 0;
for (const auto& kv : entry.used_buffer_unscheduled_use_counts) {
auto buffer = kv->first;
auto use_count = kv->second;
if (use_count == 1) {
freed_bytes += size_function_(*buffer);
}
}
int64_t max_subcomputation_bytes = 0;
for (const auto* c : instruction->called_computations()) {
auto it = memory_by_computation_.find(c);
if (it != memory_by_computation_.end()) {
int64_t subcomputation_bytes = it->second;
if (subcomputation_bytes > max_subcomputation_bytes) {
max_subcomputation_bytes = subcomputation_bytes;
}
}
}
int64_t bytes_defined;
if (max_subcomputation_bytes > 0 &&
(opcode == HloOpcode::kWhile || opcode == HloOpcode::kCall ||
opcode == HloOpcode::kConditional)) {
bytes_defined = max_subcomputation_bytes;
} else {
bytes_defined = entry.bytes_defined + max_subcomputation_bytes;
}
return freed_bytes - bytes_defined;
}
Priority GetPriority(const ReadyListEntry& entry) {
if (ShapeUtil::IsEffectiveScalar(entry.instruction->shape())) {
return {std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max()};
}
return {BytesFreedIfScheduled(entry), entry.instruction->user_count()};
}
HloInstructionSequence CreateSchedule() {
HloInstructionSequence schedule;
absl::flat_hash_map<const HloInstruction*, int64_t> unscheduled_pred_count;
for (auto* instruction : computation_->instructions()) {
for (HloInstruction* user : instruction->users()) {
unscheduled_pred_count[user]++;
}
for (HloInstruction* succ : instruction->control_successors()) {
unscheduled_pred_count[succ]++;
}
}
std::multimap<Priority, ReadyListEntry> ready_queue;
absl::flat_hash_map<const HloInstruction*,
std::multimap<Priority, ReadyListEntry>::iterator>
ready_instructions;
auto add_to_ready_queue = [&](HloInstruction* inst) {
auto entry = MakeReadyListEntry(inst);
auto it = ready_queue.emplace(GetPriority(entry), std::move(entry));
ready_instructions[inst] = it;
};
for (auto* instruction : computation_->instructions()) {
if (instruction->operands().empty() &&
instruction->control_predecessors().empty()) {
add_to_ready_queue(instruction);
}
}
while (!ready_queue.empty()) {
auto best_it = ready_queue.end();
--best_it;
HloInstruction* best = best_it->second.instruction;
VLOG(2) << "Schedule instruction: " << best->ToShortString()
<< " Bytes freed: " << best_it->first.first;
ready_queue.erase(best_it);
ready_instructions.erase(best);
schedule.push_back(best);
scheduled_instructions_.insert(best);
bool adjust_ready_queue = false;
for (const LogicalBuffer* buffer : buffer_uses_.at(best)) {
int64_t& count = unscheduled_use_count_[buffer];
CHECK_GT(count, 0);
--count;
if (count == 1) {
adjust_ready_queue = true;
}
}
auto update_pred_count = [&](HloInstruction* inst) {
int64_t pred_count = --unscheduled_pred_count.at(inst);
CHECK_GE(pred_count, 0);
if (pred_count == 0) {
add_to_ready_queue(inst);
}
};
for (HloInstruction* user : best->users()) {
update_pred_count(user);
}
for (HloInstruction* succ : best->control_successors()) {
update_pred_count(succ);
}
if (adjust_ready_queue) {
for (HloInstruction* operand : best->operands()) {
for (HloInstruction* operand_user : operand->users()) {
auto ready_instructions_it = ready_instructions.find(operand_user);
if (ready_instructions_it == ready_instructions.end()) {
continue;
}
auto ready_queue_it = ready_instructions_it->second;
auto& entry = ready_queue_it->second;
Priority new_priority = GetPriority(entry);
if (new_priority == ready_queue_it->first) {
continue;
}
ready_instructions_it->second =
ready_queue.emplace(new_priority, std::move(entry));
ready_queue.erase(ready_queue_it);
}
}
}
}
CHECK_EQ(schedule.size(), computation_->instruction_count());
CHECK_EQ(scheduled_instructions_.size(), computation_->instruction_count());
return schedule;
}
HloComputation* computation_;
const TuplePointsToAnalysis& points_to_analysis_;
const BufferValue::SizeFunction& size_function_;
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation_;
absl::flat_hash_map<const HloInstruction*, std::vector<const LogicalBuffer*>>
buffer_uses_;
absl::flat_hash_map<const LogicalBuffer*, int64_t> unscheduled_use_count_;
absl::flat_hash_set<const HloInstruction*> scheduled_instructions_;
};
int64_t SumLogicalBufferSizes(
const TuplePointsToAnalysis::BufferDefinitionVector& buffers,
const BufferValue::SizeFunction& size_function) {
int64_t size = 0;
for (const LogicalBuffer* buffer : buffers) {
size += size_function(*buffer);
}
return size;
}
absl::StatusOr<HloInstructionSequence> ScheduleComputationHelper(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerAlgorithm& algorithm,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
VLOG(2) << "Computation: " << computation->name();
if (algorithm) {
return algorithm(computation, points_to_analysis, alias_analysis,
size_function, memory_by_computation, postprocessor,
peak_memory);
}
return DefaultMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, memory_by_computation,
postprocessor, peak_memory);
}
}
absl::StatusOr<HloInstructionSequence> DFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
int64_t cumulative_total_size = 0;
int64_t total_hlos = computation->instruction_count();
struct Stats {
int64_t extra_users = 0;
int64_t total_sizes = 0;
};
absl::flat_hash_map<const HloInstruction*, Stats> stats_map;
stats_map.reserve(computation->instruction_count());
for (const HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
auto& stats = stats_map[hlo];
if (ListScheduler::IgnoreInstruction(*hlo)) {
continue;
}
stats.extra_users = hlo->users().empty() ? 0 : hlo->users().size() - 1;
int64_t logical_buffer_size = SumLogicalBufferSizes(
points_to_analysis.GetBuffersDefinedByInstruction(hlo), size_function);
stats.total_sizes = logical_buffer_size;
cumulative_total_size += logical_buffer_size;
absl::flat_hash_set<const HloInstruction*> unique_operands(
hlo->operands().begin(), hlo->operands().end());
for (const HloInstruction* operand : unique_operands) {
auto& operand_stats = stats_map.at(operand);
stats.extra_users += operand_stats.extra_users;
stats.total_sizes += operand_stats.total_sizes;
}
stats.total_sizes = std::min(stats.total_sizes, cumulative_total_size);
stats.extra_users = std::min(stats.extra_users, total_hlos);
}
CHECK_EQ(stats_map.size(), computation->instruction_count());
HloInstructionSequence sequence;
FunctionVisitor visitor([&sequence](HloInstruction* hlo) {
sequence.push_back(hlo);
return absl::OkStatus();
});
visitor.ReserveVisitStates(computation->instruction_count());
TF_RETURN_IF_ERROR(computation->AcceptWithOperandOrder(
&visitor, [&stats_map](const HloInstruction* a, const HloInstruction* b) {
auto& stats_a = stats_map.at(a);
auto& stats_b = stats_map.at(b);
if (stats_a.extra_users != stats_b.extra_users) {
return stats_a.extra_users > stats_b.extra_users;
}
if (stats_a.total_sizes != stats_b.total_sizes) {
return stats_a.total_sizes > stats_b.total_sizes;
}
return a->name() < b->name();
}));
if (postprocessor) {
sequence = postprocessor(sequence);
}
CHECK_EQ(sequence.size(), computation->instruction_count());
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory, HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function,
&memory_by_computation));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> BFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
absl::flat_hash_map<const HloInstruction*, int64_t> inst_index;
std::vector<int64_t> inst_deps(computation->instruction_count(), 0);
std::queue<HloInstruction*> ready_queue;
auto update_queue = [&](HloInstruction* inst) {
int64_t index = inst_index.at(inst);
CHECK_GE(--inst_deps[index], 0);
if (inst_deps[index] == 0) {
ready_queue.push(inst);
}
};
for (HloInstruction* inst : computation->instructions()) {
size_t index = inst_index.size();
inst_index[inst] = index;
inst_deps[index] =
inst->unique_operands().size() + inst->control_predecessors().size();
if (inst_deps[index] == 0) {
ready_queue.push(i | #include "xla/service/hlo_memory_scheduler.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloSchedulingTest : public HloTestBase {};
int64_t PeakMemoryUseOfEntryComputation(
HloModule* module, LogicalBuffer::SizeFunction size_function) {
CHECK(module->has_entry_computation());
CHECK(module->has_schedule());
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module).value();
const HloSchedule& schedule = module->schedule();
HloComputation* computation = module->entry_computation();
const HloInstructionSequence& sequence = schedule.sequence(computation);
return HeapSimulator::Run(
std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
*computation, sequence, *alias_analysis, size_function)
.value()
.heap_size;
}
TEST_F(HloSchedulingTest, LastUseScheduledFirst) {
const Shape vec = ShapeUtil::MakeShape(xla::F32, {42});
auto builder = HloComputation::Builder(TestName());
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, vec, "param"));
auto ab = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kAbs, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kExp, param));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(vec, HloOpcode::kAdd, ab, exp));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kNegate, exp));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(vec, HloOpcode::kSubtract, add, negate));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloMemoryScheduler scheduler([](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
});
ASSERT_FALSE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool changed, scheduler.Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
const std::vector<HloInstruction*>& sequence =
module->schedule().sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
EXPECT_EQ(param, sequence.front());
EXPECT_EQ(sub, sequence.back());
SequentialHloOrdering ordering(module->schedule());
EXPECT_TRUE(ordering.ExecutesBefore(add, negate));
HloDescheduler descheduler;
EXPECT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool descheduler_changed,
descheduler.Run(module.get()));
EXPECT_TRUE(descheduler_changed);
EXPECT_FALSE(module->has_schedule());
}
TEST_F(HloSchedulingTest, ListSchedulerHandlesAliasing) {
const char* module_str = R"(
HloModule test_aliasing_module
ENTRY root {
param = s32[1000] parameter(0)
p0 = s32[1000] copy(param)
p1 = s32[1000] copy(param)
t = (s32[1000], s32[1000]) tuple(p0, p1)
a = s32[1000] get-tuple-element(t), index=0
b = s32[1000] get-tuple-element(t), index=1
c = s32[1000] add(a, b)
d = s32[1000] add(c, b)
e = s32[1000] add(c, c)
f = s32[1000] add(e, e)
ROOT result = (s32[1000], s32[1000], s32[1000]) tuple(d, e, f)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
int64_t peak_memory;
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), size_fn,
ComputationSchedulerToModuleScheduler(ListMemoryScheduler),
{}, &peak_memory));
TF_ASSERT_OK(module->set_schedule(schedule));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
EXPECT_EQ(instructions_by_name.at("param"), sequence.front());
EXPECT_EQ(instructions_by_name.at("result"), sequence.back());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(instructions_by_name.at("d"),
instructions_by_name.at("e")));
EXPECT_EQ(PeakMemoryUseOfEntryComputation(module.get(), size_fn),
peak_memory);
}
TEST_F(HloSchedulingTest, HostSendDoneSchedule) {
const char* const module_str = R"(
HloModule module
ENTRY entry {
%p = f32[1000, 1000] parameter(0)
%token.0 = token[] after-all()
%send = (f32[1000, 1000], token[]) send(%p, %token.0),
channel_id=1, is_host_transfer=true
%n1 = f32[1000, 1000] negate(%p)
%n2 = f32[1000, 1000] negate(%n1)
%n3 = f32[1000, 1000] negate(%n2)
%send-done = token[] send-done(%send), channel_id=1, is_host_transfer=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
TF_ASSERT_OK_AND_ASSIGN(HloSchedule schedule,
ScheduleModule(module.get(), size_fn,
ComputationSchedulerToModuleScheduler(
ListMemoryScheduler)));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
EXPECT_LT(absl::c_find(sequence, instructions_by_name.at("send-done")),
absl::c_find(sequence, instructions_by_name.at("n1")));
}
TEST_F(HloSchedulingTest, TuplesAreAccountedCorrectly) {
auto builder = HloComputation::Builder(TestName());
const Shape r1f32 = ShapeUtil::MakeShape(xla::F32, {6});
auto lit = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1, 1})));
auto abs_const = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, lit));
auto abs_abs1 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
absl::Span<HloInstruction* const>({abs_abs1})));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
auto abs_abs2 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
builder.AddInstruction(HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd,
tuple_elm, abs_abs2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 1);
},
ComputationSchedulerToModuleScheduler(ListMemoryScheduler)));
EXPECT_EQ(module->entry_computation()->instruction_count(),
schedule.sequence(module->entry_computation()).size());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(abs_abs2, tuple));
}
TEST_F(HloSchedulingTest, MultiOutputFusionAccountedCorrectly) {
const Shape r1f32 = ShapeUtil::MakeShape(xla::F32, {5});
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1})));
auto c2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4, 5})));
auto c3 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({0, 2, 4, 6, 8})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, c1, c2));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kMultiply, add, c3));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({add, mul}));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kExp, c3));
builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, tuple_elm, exp));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{tuple, mul, add}, HloInstruction::FusionKind::kLoop);
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 2);
},
ComputationSchedulerToModuleScheduler(ListMemoryScheduler)));
EXPECT_EQ(module->entry_computation()->instruction_count(),
schedule.sequence(module->entry_computation()).size());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(exp, fusion));
}
TEST_F(HloSchedulingTest, TrivialScheduler) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_FALSE(module->has_schedule());
TF_ASSERT_OK(HloTrivialScheduler().Run(module.get()).status());
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
std::unique_ptr<HloModule> clone = module->Clone();
ASSERT_TRUE(clone->has_schedule());
TF_ASSERT_OK(clone->schedule().Verify());
}
TEST_F(HloSchedulingTest, BFSScheduler) {
const char* const hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY e {
p0 = f32[1,2,1,512,256] parameter(0)
c0 = f32[] constant(0)
c1 = f32[] constant(1)
bcast1 = f32[1,2,1,512,256] broadcast(c1), dimensions={}
add1 = f32[1,2,1,512,256] add(p0, bcast1)
c2 = f32[] constant(2)
bcast2 = f32[1,2,1,512,256] broadcast(c2), dimensions={}
add2 = f32[1,2,1,512,256] add(p0, bcast2)
c3 = f32[] constant(3)
bcast3 = f32[1,2,1,512,256] broadcast(c3), dimensions={}
add3 = f32[1,2,1,512,256] add(p0, bcast3)
c4 = f32[] constant(4)
bcast4 = f32[1,2,1,512,256] broadcast(c4), dimensions={}
add4 = f32[1,2,1,512,256] add(p0, bcast4)
c5 = f32[] constant(5)
bcast5 = f32[1,2,1,512,256] broadcast(c5), dimensions={}
add5 = f32[1,2,1,512,256] add(p0, bcast5)
r1 = f32[1,2] reduce(add1, c0), dimensions={2,3,4}, to_apply=add
r2 = f32[1,2] reduce(add2, c0), dimensions={2,3,4}, to_apply=add
r3 = f32[1,2] reduce(add3, c0), dimensions={2,3,4}, to_apply=add
r4 = f32[1,2] reduce(add4, c0), dimensions={2,3,4}, to_apply=add
r5 = f32[1,2] reduce(add5, c0), dimensions={2,3,4}, to_apply=add
out0 = f32[1,2] add(r1, r2)
out1 = f32[1,2] add(r3, r4)
out2 = f32[1,2] add(out0, out1)
ROOT out3 = f32[1,2] add(out2, r5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
},
ComputationSchedulerToModuleScheduler(BFSMemoryScheduler)));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
auto index = [&](std::string_view name) -> size_t {
const HloInstruction* instruction = instructions_by_name.at(name);
return std::distance(sequence.begin(), absl::c_find(sequence, instruction));
};
std::vector<size_t> indices = {
index("bcast1"), index("bcast2"), index("bcast3"), index("bcast4"),
index("bcast5"), index("add1"), index("add2"), index("add3"),
index("add4"), index("add5"), index("r1"), index("r2"),
index("r3"), index("r4"), index("r5"), index("out0"),
index("out1"), index("out2"), index("out3")};
EXPECT_TRUE(absl::c_is_sorted(indices));
}
}
} |
1,871 | cpp | tensorflow/tensorflow | custom_call_status | third_party/xla/xla/service/custom_call_status.cc | third_party/xla/xla/service/custom_call_status_test.cc | #ifndef XLA_SERVICE_CUSTOM_CALL_STATUS_H_
#define XLA_SERVICE_CUSTOM_CALL_STATUS_H_
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct XlaCustomCallStatus_ XlaCustomCallStatus;
void XlaCustomCallStatusSetSuccess(XlaCustomCallStatus* status);
void XlaCustomCallStatusSetFailure(XlaCustomCallStatus* status,
const char* message, size_t message_len);
#ifdef __cplusplus
}
#endif
#endif
#include "xla/service/custom_call_status_internal.h"
namespace xla {
std::optional<absl::string_view> CustomCallStatusGetMessage(
const XlaCustomCallStatus* status) {
return status->message;
}
}
void XlaCustomCallStatusSetSuccess(XlaCustomCallStatus* status) {
status->message = std::nullopt;
}
void XlaCustomCallStatusSetFailure(XlaCustomCallStatus* status,
const char* message, size_t message_len) {
status->message = std::string(message, 0, message_len);
} | #include "xla/service/custom_call_status_internal.h"
#include "xla/service/custom_call_status_test_c_caller.h"
#include "tsl/platform/test.h"
TEST(XlaCustomCallStatusTest, DefaultIsSuccess) {
XlaCustomCallStatus status;
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetSuccess) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetSuccessAfterFailure) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 5);
XlaCustomCallStatusSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetFailure) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, SetFailureAfterSuccess) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetSuccess(&status);
XlaCustomCallStatusSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, SetFailureTruncatesErrorAtGivenLength) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 4);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "erro");
}
TEST(XlaCustomCallStatusTest, SetFailureTruncatesErrorAtNullTerminator) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 100);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, CSetSuccess) {
XlaCustomCallStatus status;
CSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, CSetFailure) {
XlaCustomCallStatus status;
CSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
} |
1,872 | cpp | tensorflow/tensorflow | conditional_canonicalizer | third_party/xla/xla/service/conditional_canonicalizer.cc | third_party/xla/xla/service/conditional_canonicalizer_test.cc | #ifndef XLA_SERVICE_CONDITIONAL_CANONICALIZER_H_
#define XLA_SERVICE_CONDITIONAL_CANONICALIZER_H_
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConditionalCanonicalizer : public HloModulePass {
public:
absl::string_view name() const override {
return "conditional-canonicalizer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/conditional_canonicalizer.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) {
TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);
for (auto* branch : conditional->called_computations()) {
HloInstruction* root = branch->root_instruction();
TF_RET_CHECK(!root->shape().IsTuple());
HloInstruction* tuple =
branch->AddInstruction(HloInstruction::CreateTuple({root}));
branch->set_root_instruction(tuple, true);
}
auto parent = conditional->parent();
const Shape& root_shape = conditional->shape();
auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1));
auto new_conditional =
parent->AddInstruction(conditional->CloneWithNewShape(new_shape));
auto gte = parent->AddInstruction(
HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0));
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte));
return absl::OkStatus();
}
}
absl::StatusOr<bool> ConditionalCanonicalizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "ConditionalCanonicalizer::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kConditional &&
!inst->shape().IsTuple()) {
TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst));
changed = true;
}
}
}
XLA_VLOG_LINES(
2, "ConditionalCanonicalizer::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/conditional_canonicalizer.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ConditionalCanonicalizerTest : public HloTestBase {
protected:
ConditionalCanonicalizerTest() {}
};
TEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
true_branch {
true_param = (s32[3,2]) parameter(0)
ROOT root = s32[] constant(0)
}
false_branch {
false_param = (s32[3,2]) parameter(0)
ROOT root = s32[] constant(1)
}
ENTRY entry {
param0 = s32[3,2] parameter(0)
branch = pred[] constant(false)
param_tuple = (s32[3 ,2]) tuple(param0)
ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple),
true_computation=true_branch, false_computation=false_branch
}
)")
.value();
ConditionalCanonicalizer pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Conditional()));
}
}
} |
1,873 | cpp | tensorflow/tensorflow | space_to_batch_converter | third_party/xla/xla/service/space_to_batch_converter.cc | third_party/xla/xla/service/space_to_batch_converter_test.cc | #ifndef XLA_SERVICE_SPACE_TO_BATCH_CONVERTER_H_
#define XLA_SERVICE_SPACE_TO_BATCH_CONVERTER_H_
#include <stdbool.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/status_macros.h"
namespace xla {
struct SpaceToBatchController {
bool enable_propagations_on_base_dilations;
bool enable_propagations_on_window_dilations;
bool enable_propagations_on_trivial_window_dilations;
bool disable_starting_on_small_chains;
int64_t limit_on_batch_size;
int64_t dimension_from_end_to_convert = 1;
int64_t number_of_splits = 8;
int64_t count_of_dimensions_to_convert = 1;
};
enum class SpaceToBatchDimMap : uint8_t {
kBatch = 0,
kFeature = 1,
kSpace0 = 2,
};
inline constexpr int64_t NumMappedDims() { return 3; }
class SpaceToBatchConverter : public HloModulePass {
public:
explicit SpaceToBatchConverter(SpaceToBatchController ctrl) : ctrl_(ctrl) {}
absl::string_view name() const override { return "space-to-batch-converter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
SpaceToBatchController ctrl_;
};
}
#endif
#include "xla/service/space_to_batch_converter.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <queue>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/bitmap.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace m = match;
class ConvolutionVisitor {
public:
absl::Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution);
struct ConvDetails {
std::vector<int64_t> spatial_dimensions_to_split;
int64_t inherent_low_padding, inherent_high_padding, stride, spatial_size,
base_dilation_factor, halo_size, high_padding_for_conv,
low_padding_for_conv, kernel_spatial_dim_size, input_dim_size;
};
ConvDetails GetConvolutionDetails(HloInstruction* convolution,
ConvolutionDimensionNumbers& dim_numbers);
std::pair<std::vector<int64_t>, std::vector<int64_t>> GetSpatialDimsToSplit(
HloInstruction* old_operand);
bool IsForwardWindowDilatedConv(HloInstruction* convolution,
ConvolutionDimensionNumbers& dim_numbers);
bool CanPropagate(HloInstruction* consumer, HloInstruction* producer);
bool IsBroadcastTree(HloInstruction* op, HloInstruction* consumer,
std::vector<HloInstruction*>& instructions_to_transform);
void RewriteBroadcastTree(
HloInstruction* producer,
std::vector<HloInstruction*>& instructions_to_transform);
void PropagateOnBroadcast(HloInstruction* consumer, HloInstruction* producer);
bool IsOpcodeNonPropagatable(HloInstruction* consumer);
bool SupportedOpForPropagation(HloInstruction* consumer,
HloInstruction* producer);
bool SupportedDotForPropagation(HloInstruction* consumer,
HloInstruction* producer);
bool IsBroadcastPropagatable(HloInstruction* broadcast,
HloInstruction* old_other_op);
absl::StatusOr<bool> Propagate(HloInstruction* consumer,
HloInstruction* producer);
absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>> SplitSpace(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits,
std::vector<int64_t>* spatial_dimensions_to_split,
bool is_backprop = false, bool is_rhs = false);
absl::StatusOr<HloInstruction*> PerformSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t spatial_split_size,
int64_t num_splits);
absl::StatusOr<HloInstruction*> TransposeAndMergeBatch(
HloInstruction* activations,
absl::Span<const int64_t> final_split_spatial_dim_positioning,
int64_t activations_batch_dim, int64_t old_batch_size);
absl::StatusOr<HloInstruction*> PadAndSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits);
absl::StatusOr<HloInstruction*> PropagateOnConstant(HloInstruction* consumer,
HloInstruction* producer);
absl::Status PropagateOnConv(HloInstruction* convolution);
absl::Status PropagateOnConcat(HloInstruction* concat);
absl::Status PropagateOnReverse(HloInstruction* reverse);
absl::Status PropagateOnPad(HloInstruction* pad);
absl::Status PropagateOnSlice(HloInstruction* slice);
absl::Status PropagateOnBackpropFilterConv(HloInstruction* convolution);
bool IsConvSuitableForSpaceToBatch(HloInstruction* convolution);
bool IsThisBackPropFilterConv(HloInstruction* convolution);
absl::Status PropagateOnUsers(HloInstruction* old_conv);
absl::StatusOr<HloInstruction*> SelectValidPortion(
HloInstruction* new_instr, HloInstruction* old_instr,
HloInstruction* select_val, int64_t new_batch_dim,
absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim,
absl::Span<const int64_t> old_space_dims);
struct SpaceNextToBatchDetails {
HloInstruction* instr;
std::vector<int64_t> transpose_dims;
};
absl::StatusOr<SpaceNextToBatchDetails> BringSpaceNextToBatch(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim,
std::vector<int64_t>* spatial_dimensions_to_split,
bool is_backprop = false, bool is_rhs = false);
absl::StatusOr<HloInstruction*> ChangeSpatialSizeOnSpaceToBatchedShape(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t new_spatial_dim_size, bool increase_spatial_size = false);
absl::StatusOr<HloInstruction*> SplitAndTransposeMergedBatch(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions);
absl::StatusOr<HloInstruction*> BatchToSpace(HloInstruction* old_instr);
absl::StatusOr<HloInstruction*> HaloDuplicateWithSlice(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,
HloInstruction* pad_val = nullptr);
absl::StatusOr<bool> Run();
const bool changed() const { return changed_; }
~ConvolutionVisitor() = default;
explicit ConvolutionVisitor(SpaceToBatchController ctrl,
HloComputation* computation);
int64_t GetFirstChosenSpatialDim(HloInstruction* convolution) {
const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;
const int64_t end_point = convolution->convolution_dimension_numbers()
.input_spatial_dimensions_size() -
ctrl_.dimension_from_end_to_convert;
return end_point - dim_count + 1;
}
std::vector<int64_t> GetChosenSpatialDims(HloInstruction* convolution) {
const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
std::vector<int64_t> dims(dim_count);
for (int i = 0; i < dim_count; ++i) {
dims[i] =
convolution->convolution_dimension_numbers().input_spatial_dimensions(
first_dim + i);
}
return dims;
}
int64_t DimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) {
return permute_dims[id];
}
int DimMapper(SpaceToBatchDimMap s) { return static_cast<int>(s); }
int64_t ReverseDimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) {
return std::distance(permute_dims.begin(), absl::c_find(permute_dims, id));
}
HloInstruction* DoesConvolutionFeedReduceWindowOrSelectAndScatter(
HloInstruction* instr, int64_t depth);
bool DoesConvolutionFeedUnpropagatableOp(
HloInstruction* instr, int64_t depth = kUnpropagatableOpSearchDepth);
bool IsSpaceToBatchedSpaceSizeSuitable(HloInstruction* instr);
private:
HloComputation* computation_;
absl::flat_hash_set<HloInstruction*> convs_to_visit_;
std::vector<HloInstruction*> conv_visitor_list_;
HloInstructionSet non_propagatable_instrs_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> batch_to_space_map_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_instrs_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> instr_to_dim_map_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>>
instr_to_dim_permute_map_;
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>>
broadcast_map_;
bool changed_ = false;
static constexpr int64_t kReduceWindowSearchDepth = 10;
static constexpr int64_t kUnpropagatableOpSearchDepth = 3;
static constexpr int64_t kMultiplierOnSpaceForBaseDilation = 3;
absl::flat_hash_map<std::pair<HloInstruction*, int64_t>, bool>
unpropagatability_cache_;
SpaceToBatchController ctrl_;
};
ConvolutionVisitor::ConvolutionVisitor(SpaceToBatchController ctrl,
HloComputation* computation) {
ctrl_ = ctrl;
computation_ = computation;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kConvolution) {
continue;
}
auto convolution = inst;
if (!IsConvSuitableForSpaceToBatch(convolution)) {
VLOG(1) << "Conv not suitable for space-to-batch "
<< convolution->ToString();
continue;
}
VLOG(1) << "Conv added to space-to-batch worklist "
<< convolution->ToString();
convs_to_visit_.insert(convolution);
conv_visitor_list_.push_back(convolution);
}
}
std::pair<std::vector<int64_t>, std::vector<int64_t>>
ConvolutionVisitor::GetSpatialDimsToSplit(HloInstruction* old_operand) {
auto new_operand = old_to_new_instrs_[old_operand];
auto dim_map_val = instr_to_dim_map_[old_operand];
auto permute_dims = instr_to_dim_permute_map_[new_operand];
std::vector<int64_t> old_dims(ctrl_.count_of_dimensions_to_convert),
new_dims(ctrl_.count_of_dimensions_to_convert);
old_dims[0] = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
new_dims[0] = DimLookUp(permute_dims, old_dims[0]);
for (int i = 1; i < ctrl_.count_of_dimensions_to_convert; ++i) {
old_dims[i] = old_dims[0] + i;
new_dims[i] = new_dims[0] + i;
}
return std::make_pair(old_dims, new_dims);
}
bool ConvolutionVisitor::IsForwardWindowDilatedConv(
HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {
const int64_t window_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
if (window_dilation_factor == 1) {
return false;
}
const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
return convolution->operand(1)->shape().dimensions(kernel_spatial_dim) <
convolution->shape().dimensions(output_spatial_dim);
}
bool ConvolutionVisitor::IsConvSuitableForSpaceToBatch(
HloInstruction* convolution) {
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
if (GetFirstChosenSpatialDim(convolution) < 0) {
return false;
}
if (convolution->batch_group_count() != 1) {
return false;
}
if (convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation() != 1) {
if (!IsForwardWindowDilatedConv(convolution, dim_numbers)) {
return false;
}
}
const ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);
const int64_t low_pad = convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
if (c.base_dilation_factor != 1) {
if (!ctrl_.enable_propagations_on_base_dilations) {
return false;
}
if (c.stride != 1) {
return false;
}
if (low_pad == 0) {
if (c.kernel_spatial_dim_size != 1) {
return false;
}
} else if (low_pad != c.base_dilation_factor - 1 &&
low_pad != c.base_dilation_factor) {
return false;
}
}
int64_t activations_batch_dim = dim_numbers.input_batch_dimension();
const int64_t old_batch_size =
convolution->operand(0)->shape().dimensions(activations_batch_dim);
if (old_batch_size > ctrl_.limit_on_batch_size) {
return false;
}
VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size;
if (c.halo_size > CeilOfRatio(c.spatial_size, ctrl_.number_of_splits)) {
return false;
}
if (c.base_dilation_factor > 1 &&
c.inherent_low_padding == c.base_dilation_factor) {
if (c.spatial_size <
kMultiplierOnSpaceForBaseDilation * ctrl_.number_of_splits) {
return false;
}
}
VLOG(1) << "Legal space-to-batch convolution " << convolution->ToString();
return true;
}
bool ConvolutionVisitor::IsThisBackPropFilterConv(HloInstruction* convolution) {
auto activations = convolution->mutable_operand(0);
auto kernel = convolution->mutable_operand(1);
auto dim_numbers = convolution->convolution_dimension_numbers();
if (!old_to_new_instrs_.contains(kernel) &&
!old_to_new_instrs_.contains(activations)) {
return false;
}
if (old_to_new_instrs_.contains(kernel)) {
auto dim_map_val_op_0 = instr_to_dim_map_[kernel];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
if (convolution->convolution_dimension_numbers()
.kernel_input_feature_dimension() != old_batch_dim) {
return false;
}
}
if (old_to_new_instrs_.contains(activations)) {
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
if (dim_numbers.input_feature_dimension() != old_batch_dim) {
return false;
}
}
return true;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::HaloDuplicateWithSlice(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,
HloInstruction* pad_val) {
const int64_t spatial_dim_count = spatial_dimensions_to_split.size();
const int64_t additional_batch_size =
IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count);
const int64_t original_batch_size =
activations->shape().dimensions(activations_batch_dim) /
additional_batch_size;
const int64_t spatial_split_size =
activations->shape().dimensions(spatial_dimensions_to_split[0]);
const int64_t batch_size = ctrl_.number_of_splits;
TF_ASSIGN_OR_RETURN(
activations, SplitAndTransposeMergedBatch(
activations, activations_batch_dim, original_batch_size,
spatial_dimensions_to_split));
const int64_t rank = activations->shape().rank();
VLOG(1) << "In HaloDuplicateWithSlice with activations "
<< activations->ToString() << " batch_size " << batch_size
<< " spatial_split_size " << spatial_split_size << " low_padding "
<< low_padding << " halo size " << halo_size;
CHECK_LE(std::abs(halo_size - low_padding), spatial_split_size);
for (int64_t i = 0; i < spatial_dimensions_to_split.size(); ++i) {
int64_t spatial_dimension_to_split = activations_batch_dim + 2 * (i + 1);
int64_t remapped_batch_dimension = spatial_dimension_to_split - 1;
HloInstruction* first_slice = nullptr;
std::vector<int64_t> strides(rank, 1);
HloInstruction* padding =
pad_val == nullptr
? activations->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(activations->shape().element_type())))
: pad_val;
if (low_padding > 0) {
std::vector<int64_t> start_indices(rank, 0),
end_indices(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
start_indices[spatial_dimension_to_split] =
spatial_split_size - low_padding;
end_indices[remapped_batch_dimension] = batch_size - 1;
end_indices[spatial_dimension_to_split] = spatial_split_size;
TF_ASSIGN_OR_RETURN(first_slice,
MakeSliceHlo(activations, start_indices, end_indices,
strides, &activations->metadata(),
&activations->frontend_attributes()));
VLOG(1) << "first slice " << first_slice->ToString();
PaddingConfig padding_config =
MakeNoPaddingConfig(first_slice->shape().dimensions_size());
padding_config.mutable_dimensions(remapped_batch_dimension)
->set_edge_padding_low(1);
TF_ASSIGN_OR_RETURN(first_slice,
MakePadHlo(first_slice, padding, padding_config,
&first_slice->metadata(),
&first_slice->frontend_attributes()));
}
HloInstruction* halo_region = nullptr;
if (halo_size - low_padding > 0) {
std::vector<int64_t> start_indices_halo(rank, 0),
end_indices_halo(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
start_indices_halo[remapped_batch_dimension] = 1;
end_indices_halo[spatial_dimension_to_split] = halo_size - low_padding;
TF_ASSIGN_OR_RETURN(
halo_region,
MakeSliceHlo(activations, start_indices_halo, end_indices_halo,
strides, &activations->metadata(),
&activations->frontend_attributes()));
VLOG(1) << "halo_region " << halo_region->ToString();
PaddingConfig padding_config_halo =
MakeNoPaddingConfig(halo_region->shape().dimensions_size());
padding_config_halo.mutable_dimensions(remapped_batch_dimension)
->set_edge_padding_high(1);
TF_ASSIGN_OR_RETURN(halo_region,
MakePadHlo(halo_region, padding, padding_config_halo,
&halo_region->metadata(),
&halo_region->frontend_attributes()));
}
if ((halo_size == 0 && low_padding != 0) || low_padding < 0) {
std::vector<int64_t> start_indices_activations_cut(rank, 0),
end_indices_activations_cut(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
if (low_padding > 0) {
end_indices_activations_cut[spatial_dimension_to_split] =
spatial_split_size - low_padding;
} else {
start_indices_activations_cut[spatial_dimension_to_split] =
0 - low_padding;
end_indices_activations_cut[spatial_dimension_to_split] =
spatial_split_size;
}
TF_ASSIGN_OR_RETURN(
activations, MakeSliceHlo(activations, start_indices_activations_cut,
end_indices_activations_cut, strides,
&activations->metadata(),
&activations->frontend_attributes()));
}
if (first_slice != nullptr) {
TF_ASSIGN_OR_RETURN(
activations,
MakeConcatHlo({first_slice, activations}, spatial_dimension_to_split,
&activations->metadata(),
&activations->frontend_attributes()));
}
if (halo_region != nullptr) {
TF_ASSIGN_OR_RETURN(
activations,
MakeConcatHlo({activations, halo_region}, spatial_dimension_to_split,
&activations->metadata(),
&activations->frontend_attributes()));
}
}
TF_ASSIGN_OR_RETURN(
activations,
TransposeAndMergeBatch(
activations,
spatial_dimensions_to_split,
activations_batch_dim, original_batch_size));
VLOG(1) << "HaloDuplicated activations " << activations->ToString();
return activations;
}
absl::StatusOr<ConvolutionVisitor::SpaceNextToBatchDetails>
ConvolutionVisitor::BringSpaceNextToBatch(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim,
std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop,
bool is_rhs) {
for (int64_t i = 1; i < spatial_dimensions_to_split->size(); ++i) {
CHECK_EQ(spatial_dimensions_to_split->at(i),
spatial_dimensions_to_split->at(i - 1) + 1)
<< "Spatial dimensions are not contiguous";
}
int64_t spatial_dimension_to_split = spatial_dimensions_to_split->at(0);
std::vector<int64_t> transpose_dims(activations->shape().rank());
if (spatial_dimension_to_split == activations_batch_dim + 1) {
absl::c_iota(transpose_dims, 0);
} else {
ConvolutionDimensionNumbers new_dim_numbers = dim_numbers;
int64_t pushed_counter = 0;
int64_t new_batch_dim, new_spatial_dim;
int64_t dim_counter = 0;
if (is_rhs) {
CHECK(is_backprop);
for (int i = 0; i < activations->shape().rank(); ++i) {
if (i == activations_batch_dim) {
continue;
}
if (i == spatial_dimension_to_split) {
transpose_dims[dim_counter++] = activations_batch_dim;
new_batch_dim = pushed_counter;
pushed_counter++;
new_spatial_dim = pushed_counter;
}
if (i == dim_numbers.kernel_output_feature_dimension()) {
new_dim_numbers.set_kernel_output_feature_dimension(pushed_counter);
} else {
auto it = absl::c_find(dim_numbers.kernel_spatial_dimensions(), i);
if (it != dim_numbers.kernel_spatial_dimensions().end()) {
int64_t j = it - dim_numbers.kernel_spatial_dimensions().begin();
new_dim_numbers.set_kernel_spatial_dimensions(j, pushed_counter);
}
}
transpose_dims[dim_counter++] = i;
pushed_counter++;
}
activations_batch_dim = new_batch_dim;
spatial_dimension_to_split = new_spatial_dim;
TF_ASSIGN_OR_RETURN(activations,
MakeTransposeHlo(activations, transpose_dims));
new_dim_numbers.set_kernel_input_feature_dimension(activations_batch_dim);
} else {
for (int i = 0; i < activations->shape().rank(); ++i) {
if (i == activations_batch_dim) {
continue;
}
if (i == spatial_dimension_to_split) {
transpose_dims[dim_counter++] = activations_batch_dim;
new_batch_dim = pushed_counter;
pushed_counter++;
new_spatial_dim = pushed_counter;
}
if (is_backprop && i == dim_numbers.input_batch_dimension()) {
new_dim_numbers.set_input_batch_dimension(pushed_counter);
} else if (i == dim_numbers.input_feature_dimension()) {
new_dim_numbers.set_input_feature_dimension(pushed_counter);
} else {
auto it = absl::c_find(dim_numbers.input_spatial_dimensions(), i);
if (it != dim_numbers.input_spatial_dimensions().end()) {
int64_t j = it - dim_numbers.input_spatial_dimensions().begin();
new_dim_numbers.set_input_spatial_dimensions(j, pushed_counter);
}
}
transpose_dims[dim_counter++] = i;
pushed_counter++;
}
activations_batch_dim = new_batch_dim;
spatial_dimension_to_split = new_spatial_dim;
TF_ASSIGN_OR_RETURN(activations,
MakeTransposeHlo(activations, transpose_dims));
if (is_backprop) {
new_dim_numbers.set_input_feature_dimension(activations_batch_dim);
} else {
new_dim_numbers.set_input_batch_dimension(activations_batch_dim);
}
}
dim_numbers = new_dim_numbers;
} | #include "xla/service/space_to_batch_converter.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using SpaceToBatchConverterTest = HloTestBase;
namespace op = testing::opcode_matchers;
TEST_F(SpaceToBatchConverterTest, SimpleBatch1) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
ROOT %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 1);
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch1ConvXpose) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
ROOT tr = bf16[1,256,256,32] transpose(%convolution), dimensions={0,2,1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0), op::Select());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch1WithReduceWindow) {
std::string hlo_string = R"(
HloModule module
adder (lhs: bf16[], rhs: bf16[]) -> bf16[] {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
%constant = bf16[3] constant({1.0, 2.0, 3.0})
%tuple = (bf16[1,256,256,32], bf16[3])tuple(%convolution, %constant)
ROOT %gte = bf16[1,256,256,32] get-tuple-element(%tuple), index=0
%gte2 = bf16[3]get-tuple-element(%tuple), index=1
%init = bf16[] constant(1.0)
%reduce-window = bf16[3] reduce-window(bf16[3] %gte2, bf16[] %init),
window={size=1}, to_apply=%adder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch2) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[2,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
ROOT %convolution = bf16[2,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 1});
ASSERT_FALSE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, UnpropagatableOp) {
std::string hlo_string = R"(
HloModule module
ENTRY comp {
%reduce-window = bf16[1,76,76,64]{3,2,1,0} parameter(0)
%convert.13 = bf16[3,3,64,64]{3,2,1,0} parameter(1)
%convolution.1 = bf16[64,76,76,1]{0,2,1,3} convolution(
%reduce-window, %convert.13), window={size=3x3 pad=1_1x1_1},
dim_labels=b01f_01io->f01b
ROOT custom-call.5079 = bf16[64,152,152,1]{0,2,1,3} custom-call(%convolution.1),
custom_call_target="ResizeNearest"
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 1});
ASSERT_FALSE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, Batch1WithStrideAndPad) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,224,224,3]{3,2,1,0} parameter(0)
%p1 = bf16[7,7,3,64]{3,2,1,0} parameter(1)
ROOT %convolution.3 = bf16[1,112,112,64]{3,2,1,0} convolution(%p0, %p1),
window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 4});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);
}
TEST_F(SpaceToBatchConverterTest, Batch1WithBaseDilation) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p2 = bf16[1,28,28,128]{3,0,2,1} parameter(0)
%p3 = bf16[1,1,512,128]{3,2,1,0} parameter(1)
ROOT %c = bf16[1,56,56,512]{3,0,2,1} convolution(%p2, %p3),
window={size=1x1 pad=0_1x0_1 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01oi->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);
}
TEST_F(SpaceToBatchConverterTest, PropagateThroughDot) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
%p2 = bf16[32,32] parameter(2)
ROOT %dot.5010 = bf16[1,256,256,32] dot(%convolution, %p2),
lhs_contracting_dims={3},
rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
}
}
} |
1,874 | cpp | tensorflow/tensorflow | hlo_cse | third_party/xla/xla/service/hlo_cse.cc | third_party/xla/xla/service/hlo_cse_test.cc | #ifndef XLA_SERVICE_HLO_CSE_H_
#define XLA_SERVICE_HLO_CSE_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloCSE : public HloModulePass {
public:
explicit HloCSE(bool is_layout_sensitive,
bool only_fusion_computations = false,
bool ignore_control_dependencies = false,
bool only_scalars = false)
: is_layout_sensitive_(is_layout_sensitive),
only_fusion_computations_(only_fusion_computations),
ignore_control_dependencies_(ignore_control_dependencies),
only_scalars_(only_scalars) {}
~HloCSE() override = default;
absl::string_view name() const override { return "cse"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const bool is_layout_sensitive_;
const bool only_fusion_computations_;
const bool ignore_control_dependencies_;
const bool only_scalars_;
};
}
#endif
#include "xla/service/hlo_cse.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
template <bool kIsLayoutSensitive>
struct ConstantKey {
template <typename H>
friend H AbslHashValue(H h, const ConstantKey& key) {
h = H::combine(std::move(h), key.domain);
return Literal::Hash<H, kIsLayoutSensitive, 64>(
std::move(h), key.hlo->literal());
}
friend bool operator==(const ConstantKey& lhs, const ConstantKey& rhs) {
return lhs.domain == rhs.domain &&
(kIsLayoutSensitive ? Shape::Equal()
: Shape::Equal().IgnoreLayout())(
lhs.hlo->shape(), rhs.hlo->shape()) &&
lhs.hlo->literal().Equal(rhs.hlo->literal(), kIsLayoutSensitive);
}
HloConstantInstruction* hlo;
int64_t domain;
};
template <bool kIsLayoutSensitive>
absl::StatusOr<bool> CombineConstants(HloComputation* computation,
bool only_scalars) {
std::unique_ptr<HloDomainMap> domain_map;
if (absl::c_any_of(computation->instructions(),
[&](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kDomain;
})) {
TF_ASSIGN_OR_RETURN(domain_map, HloDomainMap::Create(computation, ""));
}
absl::flat_hash_set<ConstantKey<kIsLayoutSensitive>> constants;
int64_t combined = 0;
auto inst_it = computation->instructions().begin();
while (inst_it != computation->instructions().end()) {
HloInstruction* instruction = *inst_it;
++inst_it;
if (only_scalars && !ShapeUtil::IsScalar(instruction->shape())) {
continue;
}
HloInstruction* match = nullptr;
if (auto* constant_inst = DynCast<HloConstantInstruction>(instruction)) {
auto insert_result = constants.insert(ConstantKey<kIsLayoutSensitive>{
constant_inst,
(domain_map != nullptr ? domain_map->GetDomainId(instruction) : 0)});
if (!insert_result.second) {
match = insert_result.first->hlo;
}
}
if (match != nullptr) {
TF_CHECK_OK(instruction->ReplaceAllUsesWith(match));
TF_CHECK_OK(computation->RemoveInstruction(instruction));
++combined;
}
}
VLOG(4) << "Combined " << combined << " constants and iotas in "
<< computation->name() << " computation";
return combined > 0;
}
struct CseKey {
template <typename H>
friend H AbslHashValue(H h, const CseKey& key) {
auto instruction = key.hlo;
h = H::combine(std::move(h), instruction->opcode(),
instruction->shape().dimensions());
auto window_hash = [](H h, const Window& window) {
const auto& window_dims = window.dimensions();
for (const auto& window_dim : window_dims) {
h = H::combine(std::move(h), window_dim.size(), window_dim.stride(),
window_dim.padding_low(), window_dim.padding_high(),
window_dim.window_dilation(), window_dim.base_dilation(),
window_dim.window_reversal());
}
return H::combine(std::move(h), window_dims.size());
};
if (HloOpcodeIsBinaryCommutative(instruction->opcode())) {
CHECK_EQ(instruction->operand_count(), 2);
auto id0 = instruction->operand(0)->unique_id();
if (instruction->operand(0)->opcode() == HloOpcode::kIota) {
id0 = 0;
}
auto id1 = instruction->operand(1)->unique_id();
if (instruction->operand(1)->opcode() == HloOpcode::kIota) {
id1 = 0;
}
if (id0 > id1) {
std::swap(id0, id1);
}
h = H::combine(std::move(h), id0, id1);
} else {
for (auto operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kIota) {
continue;
}
h = H::combine(std::move(h), operand->unique_id());
}
}
for (auto c : instruction->called_computations()) {
h = H::combine(std::move(h), c->root_instruction()->opcode());
}
switch (instruction->opcode()) {
case HloOpcode::kSlice:
return H::combine(std::move(h), instruction->slice_starts(),
instruction->slice_strides());
case HloOpcode::kPad: {
const auto& padding_dims = instruction->padding_config().dimensions();
for (const auto& padding_dim : padding_dims) {
h = H::combine(std::move(h), padding_dim.edge_padding_low(),
padding_dim.edge_padding_high(),
padding_dim.interior_padding());
}
h = H::combine(std::move(h), padding_dims.size());
return std::move(h);
}
case HloOpcode::kDot: {
const auto& dot_dimension_numbers =
instruction->dot_dimension_numbers();
h = H::combine(
std::move(h),
absl::MakeSpan(dot_dimension_numbers.lhs_contracting_dimensions()),
absl::MakeSpan(dot_dimension_numbers.rhs_contracting_dimensions()),
absl::MakeSpan(dot_dimension_numbers.lhs_batch_dimensions()),
absl::MakeSpan(dot_dimension_numbers.rhs_batch_dimensions()));
return std::move(h);
}
case HloOpcode::kConvolution: {
const auto& conv_dimension_numbers =
instruction->convolution_dimension_numbers();
h = H::combine(
std::move(h), conv_dimension_numbers.input_batch_dimension(),
conv_dimension_numbers.input_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.input_spatial_dimensions()),
conv_dimension_numbers.kernel_input_feature_dimension(),
conv_dimension_numbers.kernel_output_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.kernel_spatial_dimensions()),
conv_dimension_numbers.output_batch_dimension(),
conv_dimension_numbers.output_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.output_spatial_dimensions()));
return window_hash(std::move(h), instruction->window());
}
case HloOpcode::kReduceWindow:
return window_hash(std::move(h), instruction->window());
case HloOpcode::kConcatenate:
case HloOpcode::kBroadcast:
case HloOpcode::kTranspose:
case HloOpcode::kReduce:
return H::combine(std::move(h), instruction->dimensions());
case HloOpcode::kGetTupleElement:
return H::combine(std::move(h), instruction->tuple_index());
case HloOpcode::kCompare:
return H::combine(
std::move(h),
Cast<HloCompareInstruction>(instruction)->direction());
default:
return std::move(h);
}
}
HloInstruction* hlo;
};
}
absl::StatusOr<bool> HloCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
const auto eq_instructions = [&](const HloInstruction* a,
const HloInstruction* b) {
if (a == b) {
return true;
}
if (a->opcode() != b->opcode() || a->opcode() != HloOpcode::kIota) {
return false;
}
return a->dimensions(0) == b->dimensions(0) &&
(is_layout_sensitive_
? ShapeUtil::Equal(a->shape(), b->shape())
: ShapeUtil::Compatible(a->shape(), b->shape()));
};
const auto eq_computations = [](const HloComputation* lhs,
const HloComputation* rhs) {
return *lhs == *rhs;
};
auto cse_equal = [&](const CseKey& lhs, const CseKey& rhs) {
return lhs.hlo->IdenticalIgnoringCommutativeOperandOrder(
*rhs.hlo, eq_instructions, eq_computations, is_layout_sensitive_,
true);
};
for (auto* computation : module->computations(execution_threads)) {
if (only_fusion_computations_ && !computation->IsFusionComputation()) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool combined,
is_layout_sensitive_
? CombineConstants<true>(computation, only_scalars_)
: CombineConstants<false>(computation, only_scalars_));
changed |= combined;
absl::flat_hash_set<CseKey, absl::Hash<CseKey>, decltype(cse_equal)>
representatives(computation->instruction_count() + 1,
absl::Hash<CseKey>{}, cse_equal);
for (auto instruction : computation->MakeInstructionPostOrder()) {
if (instruction->operand_count() == 0 &&
instruction->opcode() != HloOpcode::kPartitionId &&
instruction->opcode() != HloOpcode::kReplicaId) {
continue;
}
if (instruction->HasSideEffect()) {
continue;
}
if (only_scalars_ && !ShapeUtil::IsScalar(instruction->shape())) {
continue;
}
auto pair = representatives.insert(CseKey{instruction});
if (!pair.second) {
HloInstruction* equivalent_instruction = pair.first->hlo;
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(equivalent_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(
instruction, std::nullopt,
ignore_control_dependencies_));
VLOG(4) << "Replaced " << instruction->name() << " with "
<< equivalent_instruction->name();
changed = true;
continue;
}
for (int64_t i = 0; i < instruction->operand_count(); ++i) {
HloInstruction* a = instruction->mutable_operand(i);
if (a->opcode() != HloOpcode::kIota) {
continue;
}
for (int64_t j = i + 1; j < instruction->operand_count(); ++j) {
HloInstruction* b = instruction->mutable_operand(j);
if (a == b || !eq_instructions(a, b)) {
continue;
}
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(j, a));
changed = true;
if (b->IsDead()) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(b));
}
}
}
}
}
return changed;
}
} | #include "xla/service/hlo_cse.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
namespace m = xla::match;
class HloCseTest : public HloTestBase {
protected:
HloCseTest() {}
};
TEST_F(HloCseTest, CombineTwoConstants) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
HloInstruction* constant = *computation->instructions().begin();
EXPECT_EQ(42.0f, constant->literal().Get<float>({}));
auto result = ExecuteAndTransfer(module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(84.0);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(1e-4)));
}
TEST_F(HloCseTest, CombineTwoConstantsDifferentLayouts) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_THAT(add, op::Add(constant1, constant2));
HloCSE cse(true);
EXPECT_FALSE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_THAT(add, op::Add(constant1, constant2));
auto result = ExecuteAndTransfer(module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(1e-4)));
}
TEST_F(HloCseTest, ConstantsSameValueDifferentType) {
auto builder = HloComputation::Builder(TestName());
std::vector<HloInstruction*> constants;
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32_t>(42))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint64_t>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<double>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
for (int64_t i = 0; i < constants.size(); ++i) {
constants[i] = builder.AddInstruction(
HloInstruction::CreateConvert(shape_r0, constants[i]));
}
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
shape_r0, HloOpcode::kAdd, constants[0], constants[1]));
for (int64_t i = 2; i < constants.size(); ++i) {
root = builder.AddInstruction(HloInstruction::CreateBinary(
shape_r0, HloOpcode::kAdd, root, constants[i]));
}
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(20, computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(18, computation->instruction_count());
}
TEST_F(HloCseTest, NonscalarConstants) {
auto builder = HloComputation::Builder(TestName());
auto common_constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto common_constant2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto uncommon_constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}})));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
{common_constant1, common_constant2, uncommon_constant}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple,
op::Tuple(common_constant1, common_constant2, uncommon_constant));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand,
::testing::AnyOf(common_constant1, common_constant2));
EXPECT_THAT(tuple,
op::Tuple(first_operand, first_operand, uncommon_constant));
}
TEST_F(HloCseTest, IdenticalInstructions) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp3 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2, exp3}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2, exp3));
HloCSE cse(true);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2, exp3));
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand, first_operand));
}
TEST_F(HloCseTest, WhileLoopsIdenticalConditionsAndBodiesSameInput) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalConditionsAndBodiesSameInput
%body (param: (f32[], f32[])) -> (f32[], f32[]) {
%param = (f32[], f32[]) parameter(0)
%gte0 = get-tuple-element(%param), index=0
%gte1 = get-tuple-element(%param), index=1
%add = add(%gte0, %gte1)
ROOT %tuple = tuple(%gte0, %add)
}
%condition {
%param.1 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(false)
}
ENTRY %WhileLoopsIdenticalConditionsAndBodiesSameInput {
%c0 = f32[] constant(1)
%c1 = f32[] constant(2)
%t = tuple(c0, c1)
%while = while(%t), condition=%condition, body=%body
%while.1 = while(%t), condition=%condition.1, body=%body
ROOT r = tuple(while, while.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(6, computation->instruction_count());
HloCSE cse(true);
EXPECT_TRUE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, WhileLoopsIdenticalConditionsSameInputAndDifferentBodies) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalConditionsSameInputAndDifferentBodies
%body {
%param = (f32[], f32[]) parameter(0)
%get-tuple-element = get-tuple-element(%param), index=0
%get-tuple-element.1 = get-tuple-element(%param), index=1
%add = add(%get-tuple-element, %get-tuple-element.1)
ROOT %tuple = tuple(%get-tuple-element, %add)
}
%body2 {
%param.1 = (f32[], f32[]) parameter(0)
%get-tuple-element.2 = get-tuple-element(%param.1), index=0
%get-tuple-element.3 = get-tuple-element(%param.1), index=1
%sub = subtract(%get-tuple-element.2, %get-tuple-element.3)
ROOT %tuple.2 = tuple(%get-tuple-element.2, %sub)
}
%condition (param.2: (f32[], f32[])) -> pred[] {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 (param.3: (f32[], f32[])) -> pred[] {
%param.3 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(false)
}
ENTRY %WhileLoopsIdenticalConditionsSameInputAndDifferentBodies {
%constant.2 = f32[] constant(1)
%constant.3 = f32[] constant(2)
%tuple.1 = tuple(f32[] %constant.2, f32[] %constant.3)
%while = while(%tuple.1), condition=%condition, body=%body
ROOT %while.1 = while(%tuple.1), condition=%condition.1, body=%body2
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(5, computation->instruction_count());
HloCSE cse(true);
EXPECT_FALSE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, WhileLoopsIdenticalBodiesAndInputDifferentConditions) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalBodiesAndInputDifferentConditions
%body {
%param = (f32[], f32[]) parameter(0)
%get-tuple-element = get-tuple-element(%param), index=0
%get-tuple-element.1 = get-tuple-element((f32[], f32[]) %param), index=1
%add = add(%get-tuple-element, %get-tuple-element.1)
ROOT %tuple = tuple(%get-tuple-element, %add)
}
%condition {
%param.1 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(true)
}
ENTRY %WhileLoopsIdenticalBodiesAndInputDifferentConditions {
%constant.2 = f32[] constant(1)
%constant.3 = f32[] constant(2)
%tuple.1 = tuple(%constant.2, %constant.3)
%while = while(%tuple.1), condition=%condition, body=%body
ROOT %while.1 = while(%tuple.1), condition=%condition.1, body=%body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(5, computation->instruction_count());
HloCSE cse(true);
EXPECT_FALSE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp1->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp2->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(true);
EXPECT_FALSE(cse.Run(module.get()).value());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp1->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp2->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2));
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand));
}
TEST_F(HloCseTest, FusionInternalCSE) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape_r0, "p0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_r0, "p1"));
auto add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kAdd, param0, param1));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kAdd, param0, param1));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kMultiply, add1, add2));
auto computation = module->AddEntryComputation(builder.Build());
auto fused_computation =
computation
->CreateFusionInstruction({mul, add1, add2},
HloInstruction::FusionKind::kLoop)
->fused_instructions_computation();
EXPECT_EQ(5, fused_computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(4, fused_computation->instruction_count());
auto root = fused_computation->root_instruction();
EXPECT_THAT(root, op::Multiply(root->operand(0), root->operand(0)));
}
TEST_F(HloCseTest, IdenticalExpressions) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, negate1, exp1));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, negate2, exp2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add1, add2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(8, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(op::Add(negate1, exp1), op::Add(negate2, exp2)));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
auto operand = tuple->operand(0);
EXPECT_THAT(tuple, op::Tuple(operand, operand));
EXPECT_THAT(operand, op::Add(op::Negate(), op::Exp()));
}
TEST_F(HloCseTest, DoNotCombineRng) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng1 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
auto rng2 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, rng1, rng2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Add(rng1, rng2));
uint32_t count_before = computation->instruction_count();
HloCSE cse(false);
EXPECT_FALSE(cse.Run(module.get()).value());
uint32_t count_after = computation->instruction_count();
EXPECT_EQ(count_before, count_after);
root = computation->root_instruction();
EXPECT_THAT(root, op::Add(rng1, rng2));
}
TEST_F(HloCseTest, DoNotCombineOpsWithDifferentShardings) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
constant.68 = s32[1]{0} constant({0})
custom-call.82 = s32[1]{0} custom-call(constant.68), custom_call_target="Sharding", sharding={replicated}
custom-call.1343 = s32[1]{0} custom-call(constant.68), custom_call_target="Sharding", sharding={manual}
custom-call.1344 = s32[8]{0} custom-call(custom-call.1343), custom_call_target="SPMDShardToFullShape", sharding={devices=[8]0,1,2,3,4,5,6,7}
ROOT tuple = (s32[1]{0}, s32[8]{0}) tuple(custom-call.82, custom-call.1344)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_FALSE(cse.Run(m.get()).value());
}
TEST_F(HloCseTest, DoNotCombineCallsToImpureFunctions) {
auto module = CreateNewVerifiedModule();
HloComputation* rng_function = nullptr;
{
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName() + "_rng_fun");
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
scalar_shape, RandomDistribution::RNG_UNIFORM, {constant1, constant2}));
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param"));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, rng, param));
rng_function = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* computation = nullptr;
{
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({5.0f})));
auto rng1 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
auto rng2 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, rng1, rng2));
computation = module->AddEntryComputation(builder.Build());
}
EXPECT_EQ(4, computation->instruction_count());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Add(op::Map(), op::Map()));
VLOG(3) << "before: " << module->ToString();
HloCSE cse(false);
EXPECT_FALSE(cse.Run(module.get()).value());
VLOG(3) << "after: " << module->ToString();
EXPECT_EQ(4, computation->instruction_count());
root = computation->root_instruction();
EXPECT_THAT(root, op::Add(op::Map(op::Constant()), op::Map(op::Constant())));
}
TEST_F(HloCseTest, CompareComputations) {
const char* const hlo_string = R"(
HloModule m
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = add(add_lhs, add_rhs)
}
add_computation2 {
add_lhs2 = f32[] parameter(0)
add_rhs2 = f32[] parameter(1)
ROOT add_root2 = add(add_lhs2, add_rhs2)
}
ENTRY entry {
p = f32[10]{0} parameter(0)
c = f32[] constant(0)
r1 = reduce(p, c), dimensions={0}, to_apply=add_computation
r2 = reduce(p, c), dimensions={0}, to_apply=add_computation2
ROOT f2 = tuple(r1, r2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1));
}
TEST_F(HloCseTest, Domain) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = f32[] parameter(0), sharding={maximal device=0}
%domain.0 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
%domain.1 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
%domain.2 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=2}}
%negate.0 = f32[] negate(%domain.0)
%negate.1 = f32[] negate(%domain.1)
%negate.2 = f32[] negate(%domain.2)
%domain.3 = f32[] domain(%negate.0),
domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
%domain.4 = f32[] domain(%negate.1),
domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
%domain.5 = f32[] domain(%negate.2),
domain={kind="sharding", entry={maximal device=2}, exit={maximal device=0}}
%add = f32[] add(%domain.3, %domain.4)
ROOT %sub = f32[] subtract(%add, %domain.5)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(m.get()).value());
const HloInstruction* sub = m->entry_computation()->root_instruction();
const HloInstruction* add = sub->operand(0);
EXPECT_EQ(add->operand(0), add->operand(1));
EXPECT_NE(add->operand(0), sub->operand(1));
EXPECT_NE(add->operand(1), sub->operand(1));
}
TEST_F(HloCseTest, Iota) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
i1 = s64[16,16] iota(), iota_dimension=0
i2 = s64[16,16] iota(), iota_dimension=0
i3 = s64[17,16] iota(), iota_dimension=0
i4 = s64[16,16] iota(), iota_dimension=1
ROOT root = (s64[16,16], s64[16,16], s64[17,16], s64[16,16]) tuple(i1, i2, i3, i4)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_TRUE(changed);
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1));
EXPECT_NE(root->operand(0), root->operand(2));
EXPECT_NE(root->operand(0), root->operand(3));
}
TEST_F(HloCseTest, OptimizationBarrier) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%add.0 = f32[] add(%param.0, %param.1)
%cse_tmp.0 = (f32[], f32[], f32[]) tuple(%param.0, %param.1, %add.0)
%cse_tmp.1 = (f32[], f32[], f32[]) opt-barrier(%cse_tmp.0)
%param.0.1 = f32[] get-tuple-element(%cse_tmp.1), index=0
%param.1.1 = f32[] get-tuple-element(%cse_tmp.1), index=1
%add.0.1 = f32[] get-tuple-element(%cse_tmp.1), index=2
%add.1 = f32[] add(%param.0.1, %param.1.1)
ROOT %add.2 = f32[] add(%add.1, %add.0.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_FALSE(changed);
}
TEST_F(HloCseTest, OnlyScalar) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
%const1 = f32[] constant(1)
%const2 = f32[] constant(1)
%const3 = f32[2] constant({1,2})
%const4 = f32[2] constant({1,2})
%add.0 = f32[] add(%const1, %const2)
%add.1 = f32[2] add(%const3, %const4)
ROOT out = (f32[], f32[2]) tuple(%add.0, %add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false, false,
false, true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(absl::c_count_if(m->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return instruction->IsConstant();
}),
3);
}
class HloCseCustomCallTest
: public HloCseTest,
public ::testing::WithParamInterface<std::tuple<
std::string , std::string , bool >> {};
TEST_P(HloCseCustomCallTest, DoIt) {
std::string op1 = std::get<0>(GetParam());
std::string op2 = std::get<1>(GetParam());
bool should_cse = std::get<2>(GetParam());
const char* const hlo_string_tmpl = R"(
HloModule m
ENTRY entry {
p0 = f32[1,1,1] parameter(0)
op0 = $0
op1 = $0
op2 = $1
ROOT root = tuple(op0, op1, op2)
}
)";
std::string hlo_string = absl::Substitute(hlo_string_tmpl, op1, op2);
SCOPED_TRACE(absl::StrCat("Module before CSE:\n", hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1))
<< "Identical ops should be CSE'ed";
if (should_cse) {
EXPECT_EQ(root->operand(0), root->operand(2)) << "Ops should be CSE'ed";
} else {
EXPECT_NE(root->operand(0), root->operand(2)) << "Ops should not be CSE'ed";
}
}
static std::vector<
std::tuple<std::string , std::string , bool >>
CustomCallTests() {
auto build = [](absl::string_view args1, absl::string_view args2) {
absl::string_view prefix =
"f32[] custom-call(p0), custom_call_target=\"foo\", ";
return std::make_tuple(absl::StrCat(prefix, args1),
absl::StrCat(prefix, args2), false);
};
return {
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0), custom_call_target=\"foo\", "
"metadata={op_name=\"bar\"}",
true,
},
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0, p0), custom_call_target=\"foo\"",
false,
},
{
"f32[1] custom-call(p0), custom_call_target=\"foo\"",
"f32[2] custom-call(p0), custom_call_target=\"foo\"",
false,
},
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0), custom_call_target=\"bar\"",
false,
},
build("window={size=1}", "window={size=2}"),
build("dim_labels=b0f_0oi->b0f", "dim_labels=b0f_0oi->bf0"),
build("backend_config=\"foo\"", "backend_config=\"bar\""),
build("literal=s32[] 0", "literal=s32[] 1"),
build("literal=s32[] 0", "literal=f32[] 0"),
build("operand_precision={high,default}",
"operand_precision={high, high}"),
build("api_version=API_VERSION_STATUS_RETURNING",
"api_version=API_VERSION_ORIGINAL"),
build("feature_group_count=0", "feature_group_count=1"),
};
}
INSTANTIATE_TEST_SUITE_P(HloCseCustomCallTestSuite, HloCseCustomCallTest,
::testing::ValuesIn(CustomCallTests()));
TEST_F(HloCseTest, CustomCallCalledComputations) {
const char* const hlo_string = R"(
HloModule m
comp {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY entry {
p0 = f32[] parameter(0)
op0 = f32[] |
1,875 | cpp | tensorflow/tensorflow | latency_hiding_scheduler | third_party/xla/xla/service/latency_hiding_scheduler.cc | third_party/xla/xla/service/latency_hiding_scheduler_test.cc | #ifndef XLA_SERVICE_LATENCY_HIDING_SCHEDULER_H_
#define XLA_SERVICE_LATENCY_HIDING_SCHEDULER_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/xla.pb.h"
namespace xla {
struct CanonicalAsyncOp {
HloOpcode outer;
HloOpcode inner;
};
CanonicalAsyncOp DefaultGetCanonicalAsyncOp(const HloInstruction& hlo);
using GetCanonicalAsyncOpFunc =
std::function<CanonicalAsyncOp(const HloInstruction& hlo)>;
class HloGraphNode;
class ModulePressureState;
enum class ResourceType {
kNoResource = 0,
kAllToAll = 1,
kAllGather = 2,
kAllReduce = 3,
kCollectivePermute = 4,
kCopy = 5,
kReduceScatter = 6,
kSendRecv = 7,
kSendHost = 8,
kRecvHost = 9,
kCollectiveBroadcast = 10,
kNumResources = 11,
kTargetDefinedResourcesBound = 10000,
};
enum class ResourceUsageType {
kNoResource,
kResourceOccupy,
kResourceRelease,
};
enum class ResourceHazardType {
kShareable = 0,
kSerial = 1,
kNonextendable = 2,
kUnshareable = 3,
};
constexpr int64_t ResourceTypeToIndex(ResourceType resource_type) {
return static_cast<int64_t>(resource_type);
}
constexpr int64_t ResourceUsageTypeToIndex(
ResourceUsageType resource_usage_type) {
return static_cast<int64_t>(resource_usage_type);
}
using ResourcePair = std::pair<int64_t, ResourceUsageType>;
using ResourcesVector = absl::InlinedVector<ResourcePair, 1>;
class HloGraphNode;
class HloScheduleGraph;
struct SchedulerConfig {
int64_t collective_broadcast_overlap_limit = 1;
int64_t collective_permute_overlap_limit = 1;
int64_t all_to_all_overlap_limit = 1;
int64_t all_gather_overlap_limit = 1;
int64_t all_reduce_overlap_limit = 1;
int64_t reduce_scatter_overlap_limit = 1;
int64_t send_recv_overlap_limit = 1;
int64_t send_recv_host_overlap_limit = 1;
int64_t copy_overlap_limit = 1;
uint64_t memory_limit = UINT64_MAX;
bool schedule_send_recvs = false;
bool force_send_recv_to_use_same_resource = false;
bool use_real_cost_model = false;
bool aggressive_scheduling_policies = false;
bool enable_release_start_policy = false;
bool resource_sharing = false;
bool resource_serializing = false;
bool depth_based_memory_pressure_reduction = false;
int64_t rerun = 0;
};
class LatencyEstimator {
public:
using TimeCost = double;
virtual TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const = 0;
virtual TimeCost NodeCost(const HloInstruction* node) const = 0;
virtual int CyclesPerMicrosecond() const = 0;
virtual ~LatencyEstimator() = default;
inline CanonicalAsyncOp GetCanonicalAsyncOp(const HloInstruction& hlo) const {
return get_canonical_async_op_(hlo);
}
bool IsAsyncPair(const HloGraphNode& from, const HloGraphNode& target) const;
bool IsP2pPair(const HloGraphNode& from, const HloGraphNode& target) const;
explicit LatencyEstimator(
GetCanonicalAsyncOpFunc func = DefaultGetCanonicalAsyncOp)
: get_canonical_async_op_(func) {}
private:
GetCanonicalAsyncOpFunc get_canonical_async_op_;
};
class ApproximateLatencyEstimator : public LatencyEstimator {
public:
explicit ApproximateLatencyEstimator(
GetCanonicalAsyncOpFunc func = DefaultGetCanonicalAsyncOp)
: LatencyEstimator(func) {}
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const override;
TimeCost NodeCost(const HloInstruction* instr) const override;
int CyclesPerMicrosecond() const override { return 1; }
public:
static constexpr TimeCost kLowCost = 1.0;
static constexpr TimeCost kMediumCost = 1000.0;
static constexpr TimeCost kHighCost = 5000.0;
protected:
static constexpr TimeCost kLowLatency = 1.0;
static constexpr TimeCost kHighLatency = 5000.0;
};
class AsyncTracker {
public:
virtual ~AsyncTracker() = default;
virtual bool IsSupportedAsyncDone(const HloInstruction& hlo) const;
virtual bool IsSupportedAsyncStart(const HloInstruction& hlo) const;
virtual ResourcesVector GetResourcesFromInstructionImpl(
const HloInstruction& hlo) const;
virtual ResourcesVector GetResourcesFromInstruction(
const HloInstruction& hlo) const;
virtual void PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const {}
virtual int64_t GetNumResourcesPerInstruction(
ResourceType resource_type, const HloInstruction& instr) const;
virtual int64_t GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const;
virtual void SetConcurrentResourceLimits(
absl::flat_hash_map<int64_t, int64_t>& max_concurrent_resource) const;
virtual absl::string_view GetResourceName(int64_t resource_type) const;
absl::string_view GetResourceUsageName(int64_t resource_usage_type) const;
absl::string_view GetResourceUsageName(
ResourceUsageType resource_usage_type) const;
static int64_t GetFirstTargetDefinedResource() {
return static_cast<int64_t>(ResourceType::kTargetDefinedResourcesBound) + 1;
}
virtual int64_t GetNumTargetDefinedResources() const;
virtual int64_t GetNumAvailableResources(int64_t resource_type) const;
virtual ResourceHazardType GetResourceHazardType(int64_t resource_type) const;
virtual absl::InlinedVector<int64_t, 1>
GetReleasedShareableResourcesFromVector(
const ResourcesVector& resources) const;
virtual absl::InlinedVector<int64_t, 1>
GetOccupiedShareableResourcesFromVector(
const ResourcesVector& resources) const;
virtual absl::InlinedVector<int64_t, 1> GetOccupiedSerialResourcesFromVector(
const ResourcesVector& resources) const;
virtual absl::InlinedVector<int64_t, 1>
GetReleasedNonextendableResourcesFromVector(
const ResourcesVector& resources) const;
inline CanonicalAsyncOp GetCanonicalAsyncOp(const HloInstruction& hlo) const {
return get_canonical_async_op_(hlo);
}
explicit AsyncTracker(
const SchedulerConfig& config,
GetCanonicalAsyncOpFunc func = DefaultGetCanonicalAsyncOp)
: config_(config), get_canonical_async_op_(func) {}
private:
const SchedulerConfig config_;
mutable absl::flat_hash_map<const HloComputation*,
absl::flat_hash_map<int64_t, int64_t>>
async_in_computation_cache_;
GetCanonicalAsyncOpFunc get_canonical_async_op_;
protected:
mutable absl::flat_hash_map<const HloInstruction*, ResourcesVector>
resources_cache_;
};
class SchedulerCore {
public:
virtual absl::Status InitializeScheduler(const HloModule* module) = 0;
virtual absl::StatusOr<std::vector<HloInstruction*>> ScheduleComputation(
const HloComputation* computation) = 0;
virtual ~SchedulerCore() = default;
virtual int64_t GetMemoryPeak() = 0;
virtual void SetMemoryLimit(uint64_t new_limit) = 0;
virtual uint64_t GetMemoryLimit() = 0;
virtual int64_t GetRerunTimes() = 0;
};
class HloEdge {
public:
HloEdge(LatencyEstimator::TimeCost latency, HloGraphNode* target)
: latency_(latency), original_latency_(latency), target_(target) {}
LatencyEstimator::TimeCost Latency() const { return latency_; }
LatencyEstimator::TimeCost OriginalLatency() const {
return original_latency_;
}
void SetLatency(LatencyEstimator::TimeCost latency) { latency_ = latency; }
void SetOriginalLatency(LatencyEstimator::TimeCost original_latency) {
original_latency_ = original_latency;
}
const HloGraphNode& Target() const { return *target_; }
HloGraphNode& Target() { return *target_; }
std::string ToString() const;
private:
LatencyEstimator::TimeCost latency_;
LatencyEstimator::TimeCost original_latency_;
HloGraphNode* target_;
};
class HloGraphNode {
public:
using TimeCost = LatencyEstimator::TimeCost;
explicit HloGraphNode(const HloInstruction* i, int64_t original_position)
: instr_(i), original_position_(original_position) {}
const HloInstruction& GetInstr() const { return *instr_; }
bool IsScheduled() const { return scheduled_; }
int32_t GetIndegree() const { return indegree_; }
int32_t GetOutdegree() const { return outdegree_; }
TimeCost GetReadyTime() const { return ready_time_; }
void SetIndegree(int64_t indeg) { indegree_ = indeg; }
void SetOutdegree(int64_t outdeg) { outdegree_ = outdeg; }
void SetScheduled() { scheduled_ = true; }
void SetReadyTime(TimeCost ready_time) { ready_time_ = ready_time; }
TimeCost GetCost() const { return cost_; }
void SetCost(TimeCost cost) { cost_ = cost; }
TimeCost GetAsyncDepth() const { return async_depth_; }
TimeCost GetDepth() const { return depth_; }
TimeCost GetGraphDepth() const { return graph_depth_; }
void SetAsyncDepth(TimeCost async_depth) { async_depth_ = async_depth; }
void SetDepth(TimeCost depth) { depth_ = depth; }
void SetGraphDepth(TimeCost graph_depth) { graph_depth_ = graph_depth; }
bool GetForceDelay() const { return force_delay_; }
void SetForceDelay(bool force_delay) { force_delay_ = force_delay; }
bool GetForceEarly() const { return force_early_; }
void SetForceEarly(bool force_early) { force_early_ = force_early; }
ResourcesVector GetResources() const { return resources_; }
bool DoesOccupyAnyResource() const {
return absl::c_any_of(resources_, [](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceOccupy;
});
}
bool DoesReleaseAnyResource() const {
return absl::c_any_of(resources_, [](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceRelease;
});
}
bool DoesOccupyShareableResource(int64_t resource) const {
return absl::c_linear_search(occupied_shareable_resources_, resource);
}
bool DoesReleaseResource(ResourceType res) const {
return absl::c_any_of(resources_, [res](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceRelease &&
resource.first == ResourceTypeToIndex(res);
});
}
std::optional<ResourceUsageType> UsesResourceType(ResourceType res) const {
int64_t res_type = ResourceTypeToIndex(res);
for (const auto& [resource_type, usage_type] : resources_) {
if (resource_type == res_type) {
return usage_type;
}
}
return std::nullopt;
}
std::optional<ResourceUsageType> UsesResourceType(int64_t res) const {
for (const auto& [resource_type, usage_type] : resources_) {
if (resource_type == res) {
return usage_type;
}
}
return std::nullopt;
}
std::vector<int64_t> GetShareableResourcesOnEdge(const HloEdge& edge) const {
HloGraphNode node = edge.Target();
std::vector<int64_t> resources;
absl::c_for_each(released_shareable_resources_,
[&node, &resources](const int64_t resource) {
if (node.DoesOccupyShareableResource(resource)) {
resources.push_back(resource);
}
});
return resources;
}
absl::Span<HloEdge> GetPredecessors() {
return absl::MakeSpan(predecessors_);
}
absl::Span<const HloEdge> GetPredecessors() const {
return absl::MakeConstSpan(predecessors_);
}
void AddPredecessor(const HloEdge& e) { predecessors_.push_back(e); }
absl::Span<HloEdge> GetSuccessors() { return absl::MakeSpan(successors_); }
absl::Span<const HloEdge> GetSuccessors() const {
return absl::MakeConstSpan(successors_);
}
void AddSuccessor(const HloEdge& e) { successors_.push_back(e); }
int64_t GetOriginalPosition() const { return original_position_; }
std::string ToString(const AsyncTracker* async_tracker = nullptr) const {
std::string result;
absl::StrAppend(&result, "Instr: ", instr_->ToShortString(), "\n");
absl::StrAppend(&result, "ReadyTime: ", ready_time_, "\n");
absl::StrAppend(&result, "Indegree: ", indegree_, "\n");
absl::StrAppend(&result, "Outdegree: ", outdegree_, "\n");
absl::StrAppend(&result, "Cost: ", cost_, "\n");
absl::StrAppend(&result, "Async Depth: ", async_depth_, "\n");
absl::StrAppend(&result, "Depth: ", depth_, "\n");
absl::StrAppend(&result, "Graph Depth: ", graph_depth_, "\n");
absl::StrAppend(&result, "Force Delay: ", force_delay_, "\n");
absl::StrAppend(&result, "Force Early: ", force_early_, "\n");
absl::StrAppend(&result, "Predecessors:\n");
for (const HloEdge& e : predecessors_) {
absl::StrAppend(&result, e.ToString());
}
absl::StrAppend(&result, "Successors:\n");
for (const HloEdge& e : successors_) {
absl::StrAppend(&result, e.ToString());
}
if (async_tracker != nullptr) {
absl::StrAppend(&result, "Resources:\n");
for (const auto& [resource, usage] : resources_) {
absl::StrAppend(
&result, "\tResource: ", async_tracker->GetResourceName(resource),
" usage: ", async_tracker->GetResourceUsageName(usage), "\n");
}
}
return result;
}
private:
friend class HloScheduleGraph;
std::vector<HloEdge> predecessors_;
std::vector<HloEdge> successors_;
const HloInstruction* instr_;
int64_t original_position_;
TimeCost ready_time_ = std::numeric_limits<TimeCost>::max();
int32_t indegree_ = 0;
int32_t outdegree_ = 0;
TimeCost cost_ = 0.0;
TimeCost async_depth_ = 0.0;
TimeCost depth_ = 0.0;
int64_t graph_depth_ = 0;
ResourcesVector resources_;
bool force_delay_ = false;
bool force_early_ = false;
bool scheduled_ = false;
absl::InlinedVector<int64_t, 1> released_shareable_resources_;
absl::InlinedVector<int64_t, 1> occupied_shareable_resources_;
};
class HloScheduleGraph {
public:
HloScheduleGraph(const std::vector<HloInstruction*>* post_order_instructions,
HloAliasAnalysis* alias_analysis,
const LatencyEstimator* latency_estimator,
const AsyncTracker* async_tracker);
std::string ToString(const AsyncTracker* async_tracker = nullptr) const;
HloGraphNode& GetNode(const HloInstruction* instr) const;
std::vector<HloGraphNode*> FindBottomRoots() const;
std::vector<HloGraphNode*> FindTopRoots() const;
void InitializeGraphAnalysis(const AsyncTracker* async_tracker);
absl::Span<const HloInstruction* const> GetOriginalInstrList() const {
return absl::MakeConstSpan(original_order_);
}
int64_t OriginalInstructionPosition(const HloInstruction* instr) const {
auto it = instr_order_map_.find(instr);
CHECK(it != instr_order_map_.end());
return it->second;
}
private:
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloGraphNode>>
nodes_;
absl::flat_hash_map<const HloInstruction*, int64_t> instr_order_map_;
std::vector<const HloInstruction*> original_order_;
bool IsPredecessorTransitively(const HloGraphNode* node,
const HloGraphNode* possible_predecessor);
};
class BufferInfoTracker {
public:
struct ValueInfo {
const HloBuffer* value = nullptr;
const HloInstruction* first_definition = nullptr;
int64_t buffer_size = 0;
};
BufferInfoTracker(const HloModule* module,
const HloAliasAnalysis* alias_analysis,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes);
static ValueInfo CreateBufferInfo(
const HloBuffer* value, const HloInstruction* first_definition,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes) {
return ValueInfo{
value, first_definition,
shape_size_bytes(value->values()[0]->shape())};
}
const ValueInfo& GetBufferInfo(HloBuffer::Id id) const {
return buffer_infos_[id];
}
private:
std::vector<ValueInfo> buffer_infos_;
};
class MemoryPressureTracker {
public:
using LiveBufferSet = absl::flat_hash_set<HloBuffer::Id>;
struct MemoryPressureState {
int64_t memory_peak = 0;
absl::flat_hash_set<HloBuffer::Id> live_ids_at_bottom;
};
MemoryPressureTracker(
const HloAliasAnalysis* hlo_alias_analysis,
const BufferInfoTracker& buffer_tracker,
const absl::flat_hash_map<const HloComputation*, MemoryPressureState>&
pressure_state_cache)
: hlo_alias_analysis_(hlo_alias_analysis),
live_buffers_(hlo_alias_analysis->buffers().back().id() + 1),
buffer_tracker_(buffer_tracker),
pressure_state_cache_(pressure_state_cache),
live_memory_usage_(0),
initial_memory_pressure_(0) {}
void Initialize(const HloComputation* computation,
const LiveBufferSet& initial_live_buffers);
void UpdateBuffers(const HloInstruction* instruction);
std::pair<int64_t, int64_t> MemoryPressureDifference(
const HloInstruction* instruction) const;
absl::flat_hash_set<HloBuffer::Id> live_buffers() const {
return live_buffers_set_;
}
bool BufferIsLive(const HloValue* buffer) const {
CHECK_LT(buffer->id(), live_buffers_.size());
return live_buffers_[buffer->id()];
}
int64_t memory_usage() const {
return live_memory_usage_ + initial_memory_pressure_;
}
int64_t initial_memory_pressure() const { return initial_memory_pressure_; }
const MemoryPressureState& pressure_state() const { return pressure_state_; }
private:
static bool ShouldSkipBufferAllocations(
const HloInstruction* instruction, const ShapeIndex& idx,
const HloInstruction* first_definition) {
if ((instruction->opcode() == HloOpcode::kGetTupleElement ||
instruction->opcode() == HloOpcode::kBitcast) &&
!idx.empty()) {
return true;
}
if (first_definition->opcode() == HloOpcode::kParameter &&
first_definition->parent()->IsEntryComputation()) {
return true;
}
return false;
}
static bool ShouldSkipBufferReleases(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kParameter) {
return true;
}
return false;
}
const HloAliasAnalysis* hlo_alias_analysis_;
std::vector<int8_t> live_buffers_;
LiveBufferSet live_buffers_set_;
const BufferInfoTracker& buffer_tracker_;
absl::flat_hash_map<
HloInstruction*,
std::vector<std::pair<BufferInfoTracker::ValueInfo, ShapeIndex>>>
output_buffers_;
absl::flat_hash_map<HloInstruction*,
std::vector<BufferInfoTracker::ValueInfo>>
defined_buffers_;
const absl::flat_hash_map<const HloComputation*, MemoryPressureState>&
pressure_state_cache_;
int64_t live_memory_usage_;
int64_t initial_memory_pressure_;
MemoryPressureState pressure_state_;
};
class ModulePressureState {
public:
using PressureStateMap =
absl::flat_hash_map<const HloComputation*,
MemoryPressureTracker::MemoryPressureState>;
ModulePressureState(
const HloModule* module, const HloAliasAnalysis* hlo_alias_analysis,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes)
: module_(module),
hlo_alias_analysis_(hlo_alias_analysis),
buffer_tracker_(module, hlo_alias_analysis, shape_size_bytes) {}
void InitializePressureStates();
bool ComputationIsMemoryTracked(const HloComputation* computation) const {
return ContainsKey(memory_pressure_states_, computation);
}
const MemoryPressureTracker::MemoryPressureState&
GetPressureStateForComputation(const HloComputation* comp) const {
auto it = memory_pressure_states_.find(comp);
CHECK(it != memory_pressure_states_.end())
<< "No state for " << comp->name();
return it->second;
}
void UpdatePressureStateForComputation(
const HloComputation* comp,
MemoryPressureTracker::MemoryPressureState state) {
memory_pressure_states_[comp] = state;
memory_peak_ = std::max(memory_peak_, state.memory_peak);
}
const PressureStateMap& pressure_state_cache() const {
return memory_pressure_states_;
}
const BufferInfoTracker& buffer_tracker() const { return buffer_tracker_; }
int64_t GetMemoryPeak() { return memory_peak_; }
void SetMemoryPeak(int64_t peak) { memory_peak_ = peak; }
private:
const HloModule* module_;
const HloAliasAnalysis* hlo_alias_analysis_;
absl::flat_hash_map<const HloComputation*,
MemoryPressureTracker::MemoryPressureState>
memory_pressure_states_;
BufferInfoTracker buffer_tracker_;
int64_t memory_peak_ = 0;
};
class DefaultSchedulerCore : public SchedulerCore {
public:
using ReadyQueueSet = std::vector<HloGraphNode*>;
using ResourceMap = absl::flat_hash_map<int64_t, int64_t>;
using ShouldSkipNodeFunction = std::function<bool(const HloGraphNode*)>;
struct ScheduleCandidate {
HloGraphNode* node = nullptr;
std::optional<std::pair<int64_t, int64_t>> pressure_change;
std::optional<HloGraphNode::TimeCost> estimated_connected_send_ready_time;
std::optional<bool> resource_constrained;
};
struct CandidateResult {
ScheduleCandidate result;
const char* reason;
};
using TargetSchedulingRule = std::function<std::optional<CandidateResult>(
ScheduleCandidate&, ScheduleCandidate&)>;
static std::optional<bool> TrueForOneOnly(bool first, bool second) {
if (first == second) {
return std::nullopt;
}
return first;
}
static std::optional<CandidateResult> ChooseBestCandidate(
bool first_cond, const ScheduleCandidate& first_candidate,
bool second_cond, const ScheduleCandidate& second_candidate,
const char* reason) {
if (auto cond = TrueForOneOnly(first_cond, second_cond)) {
return CandidateResult{*cond ? first_candidate : second_candidate,
reason};
}
return std::nullopt;
}
struct SchedulingState {
HloScheduleGraph sched_graph;
ReadyQueueSet ready_set;
ResourceMap max_concurrent_resource;
std::vector<HloInstruction*> new_sequence_reversed;
HloGraphNode::TimeCost current_ti | #include "xla/service/latency_hiding_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/async_collective_creator.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr int kMaxConcurrentAsyncCollectivePermutes = 5;
int PositionInVector(absl::Span<HloInstruction* const> vec,
const HloInstruction* element) {
return std::distance(vec.begin(), std::find(vec.begin(), vec.end(), element));
}
bool MaxConcurrentCollectivePermutesBelowThreshold(
absl::Span<HloInstruction* const> instruction_sequence) {
int max_concurrent_collective_permutes = 0;
int num_concurrent_collective_permutes = 0;
for (HloInstruction* instruction : instruction_sequence) {
if (instruction->opcode() == HloOpcode::kCollectivePermuteStart) {
num_concurrent_collective_permutes += 1;
max_concurrent_collective_permutes =
std::max(max_concurrent_collective_permutes,
num_concurrent_collective_permutes);
}
if (instruction->opcode() == HloOpcode::kCollectivePermuteDone) {
num_concurrent_collective_permutes -= 1;
}
}
int max_num_collective_permutes_threshold =
kMaxConcurrentAsyncCollectivePermutes;
return max_concurrent_collective_permutes <=
max_num_collective_permutes_threshold;
}
int GetIndex(absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
}
int GetOpcodeIndexUsingMetaData(
HloOpcode opcode, absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view metadata_name) {
return absl::c_find_if(instruction_sequence,
[metadata_name, opcode](HloInstruction* instruction) {
return instruction->metadata().op_name() ==
metadata_name &&
instruction->opcode() == opcode;
}) -
instruction_sequence.begin();
}
SchedulerConfig GetDefaultSchedConfig() {
SchedulerConfig sched_cfg;
sched_cfg.collective_permute_overlap_limit =
kMaxConcurrentAsyncCollectivePermutes;
sched_cfg.send_recv_overlap_limit = INT32_MAX;
return sched_cfg;
}
class TestLatencyEstimator : public LatencyEstimator {
public:
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const override {
static constexpr TimeCost kLowLatency = 1.0;
if (from.GetInstr().opcode() == HloOpcode::kCollectivePermuteStart &&
target.GetInstr().opcode() == HloOpcode::kCollectivePermuteDone) {
return kLowLatency *
ShapeUtil::ElementsIn(from.GetInstr().operand(0)->shape());
}
return kLowLatency;
}
TimeCost NodeCost(const HloInstruction* instr) const override {
if (instr->IsLoopFusion()) {
return instr->shape().IsTuple()
? kMediumCost
: kLowCost * ShapeUtil::ElementsIn(instr->shape());
}
if (instr->IsOutputFusion() || instr->opcode() == HloOpcode::kConvolution) {
return instr->shape().IsTuple()
? kHighCost
: kMediumCost * ShapeUtil::ElementsIn(instr->shape());
}
return kLowCost;
}
int CyclesPerMicrosecond() const override { return 1; }
public:
static constexpr TimeCost kLowCost = 1.0;
static constexpr TimeCost kMediumCost = 1000.0;
static constexpr TimeCost kHighCost = 5000.0;
};
absl::StatusOr<bool> RunScheduler(
HloModule* module, SchedulerConfig sched_config = GetDefaultSchedConfig(),
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>()) {
AsyncCollectiveCreator::CollectiveCreatorConfig config{
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue};
TF_ASSIGN_OR_RETURN(bool value,
AsyncCollectiveCreator(std::move(config)).Run(module));
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
auto async_tracker = std::make_unique<AsyncTracker>(sched_config);
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
value, LatencyHidingScheduler(std::move(latency_estimator),
std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
}
class LatencyHidingSchedulerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(
auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest()));
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(hlo_module));
}
};
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncSimple) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, c0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 1);
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[1,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done),
metadata={op_type="Bitcast" op_name="ag0"}
%ag-start.2 = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done.2 = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
%ag-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done.2),
metadata={op_type="Bitcast" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllGather" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllGather" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ag-done-bc.2, %ag-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncReshaped) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[1,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done),
metadata={op_type="Bitcast" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done-bc, c0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 2);
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kBitcast,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 1);
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, %ag-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
c0 = f32[16,256,256]{2,1,0} convolution(ag-done, ag-done.2),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%c0, %c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped3) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
c0 = f32[16,256,256]{2,1,0} convolution(ag-done, ag-done.2),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, %ag-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllReduceAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %a = f32[] add(p0, p1)
}
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[2,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ar-start = f32[2,8,256,256] all-reduce-start(
f32[2,8,256,256] %color_operand.1), replica_groups={{0,1}}, to_apply=%add,
metadata={op_type="AllReduce" op_name="ar0"}
%ar-start.2 = f32[2,8,256,256] all-reduce-start(
f32[2,8,256,256] %color_operand.2), replica_groups={{0,1}}, to_apply=%add,
metadata={op_type="AllReduce" op_name="ar1"}
%ar-done = f32[2,8,256,256] all-reduce-done(
f32[2,8,256,256] %ar-start),
metadata={op_type="AllReduce" op_name="ar0"}
%ar-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ar-done),
metadata={op_type="Bitcast" op_name="ar0"}
%ar-done.2 = f32[2,8,256,256] all-reduce-done(
f32[2,8,256,256] %ar-start.2),
metadata={op_type="AllReduce" op_name="ar1"}
%ar-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ar-done.2),
metadata={op_type="Bitcast" op_name="ar1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllReduce" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllReduce" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ar-done-bc.2, %ar-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceDone,
new_instruction_sequence, "ar0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceStart,
new_instruction_sequence, "ar0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceDone,
new_instruction_sequence, "ar1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceStart,
new_instruction_sequence, "ar1"));
}
TEST_F(LatencyHidingSchedulerTest, WhileLoopAliasingBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT add = bf16[8]{0} add(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* while_body = hlo_module->GetComputationWithName("while_body");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(while_body).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* cp_start =
while_body->root_instruction()->operand(0)->operand(0);
EXPECT_EQ(cp_start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_LT(GetIndex(new_instruction_sequence, "add0"),
GetIndex(new_instruction_sequence, cp_start->name()));
}
TEST_F(LatencyHidingSchedulerTest, WhileLoopAliasingBug2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1 |
1,876 | cpp | tensorflow/tensorflow | result_caster | third_party/xla/xla/service/result_caster.cc | third_party/xla/xla/service/result_caster_test.cc | #ifndef XLA_SERVICE_RESULT_CASTER_H_
#define XLA_SERVICE_RESULT_CASTER_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
#include "xla/util.h"
namespace xla {
class ResultCaster : public OpExpanderPass {
public:
explicit ResultCaster(HloPredicate extra_filter = nullptr)
: OpExpanderPass(std::move(extra_filter)) {}
absl::string_view name() const override { return "result_caster"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/result_caster.h"
#include <optional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
namespace xla {
namespace {
absl::StatusOr<std::optional<Shape>> MaybeInferShape(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kDot:
return ShapeInference::InferDotOpShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->dot_dimension_numbers(),
std::nullopt,
Cast<HloDotInstruction>(instruction)->sparsity());
case HloOpcode::kConvolution:
return ShapeInference::InferConvolveShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->feature_group_count(), instruction->batch_group_count(),
instruction->window(), instruction->convolution_dimension_numbers(),
std::nullopt);
default:
return std::optional<Shape>(std::nullopt);
}
}
}
bool ResultCaster::InstructionMatchesPattern(HloInstruction* instruction) {
auto status_or_inferred_shape = MaybeInferShape(instruction);
if (!status_or_inferred_shape.ok() ||
!status_or_inferred_shape->has_value()) {
return false;
}
const Shape& inferred_shape = status_or_inferred_shape.value().value();
return inferred_shape.element_type() != instruction->shape().element_type();
}
absl::StatusOr<HloInstruction*> ResultCaster::ExpandInstruction(
HloInstruction* instruction) {
auto* computation = instruction->parent();
Shape inferred_shape = MaybeInferShape(instruction).value().value();
*inferred_shape.mutable_layout() = instruction->shape().layout();
auto clone = computation->AddInstruction(
instruction->CloneWithNewShape(inferred_shape));
return computation->AddInstruction(
HloInstruction::CreateConvert(instruction->shape(), clone));
}
} | #include "xla/service/result_caster.h"
#include <memory>
#include <tuple>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class ResultCasterTest
: public HloTestBase,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {};
TEST_P(ResultCasterTest, CastResultWhenNeeded) {
PrimitiveType lhs_type, rhs_type, result_type;
std::tie(lhs_type, rhs_type, result_type) = GetParam();
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool casted, ResultCaster().Run(module.get()));
const PrimitiveType accumulation_type =
primitive_util::HigherPrecisionType(lhs_type, rhs_type);
const bool should_cast = result_type != accumulation_type;
EXPECT_EQ(casted, should_cast);
auto lhs = op::Parameter(0);
auto rhs = op::Parameter(1);
auto original_shape_str = absl::Substitute(
"$0[2,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type));
auto accumulation_shape_str = absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(accumulation_type));
if (should_cast) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Convert(AllOf(op::Dot(lhs, rhs),
op::Shape(accumulation_shape_str))),
op::Shape(original_shape_str)));
} else {
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(lhs, rhs), op::Shape(original_shape_str)));
}
}
INSTANTIATE_TEST_SUITE_P(All, ResultCasterTest,
::testing::Values(std::make_tuple(BF16, BF16, S32),
std::make_tuple(F32, F32, S32),
std::make_tuple(F32, BF16, F32)));
TEST_F(ResultCasterTest, SparseDot) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
p0 = bf16[2,16]{1,0} parameter(0)
p1 = bf16[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool casted, ResultCaster().Run(module.get()));
EXPECT_TRUE(casted);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Convert(::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot,
{op::Parameter(0), op::Parameter(1), op::Parameter(2)}))));
}
}
} |
1,877 | cpp | tensorflow/tensorflow | reduce_window_rewriter | third_party/xla/xla/service/reduce_window_rewriter.cc | third_party/xla/xla/service/reduce_window_rewriter_test.cc | #ifndef XLA_SERVICE_REDUCE_WINDOW_REWRITER_H_
#define XLA_SERVICE_REDUCE_WINDOW_REWRITER_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReduceWindowRewriter : public HloModulePass {
public:
explicit ReduceWindowRewriter(int64_t base_length)
: base_length_(base_length) {}
absl::string_view name() const override { return "reduce-window-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::Status ReplaceReduceWindowWithReshape(
HloReduceWindowInstruction* reduce_window);
absl::StatusOr<bool> TryOptimizeCumSumOrProd(
HloReduceWindowInstruction* reduce_window);
int64_t base_length_;
};
}
#endif
#include "xla/service/reduce_window_rewriter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
static size_t FlattenShapeIndex(const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return 0;
}
CHECK_EQ(shape_index.size(), 1);
return shape_index.back();
}
static Shape ShapeAtIndex(const Shape& shape, const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return shape;
}
CHECK_EQ(shape_index.size(), 1);
return ShapeUtil::GetTupleElementShape(shape, shape_index.back());
}
static HloInstruction* GetAtIndex(HloInstruction* hlo,
const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return hlo;
}
CHECK_EQ(shape_index.size(), 1);
return hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeAtIndex(hlo->shape(), shape_index), hlo, shape_index.back()));
}
absl::Status ReduceWindowRewriter::ReplaceReduceWindowWithReshape(
HloReduceWindowInstruction* reduce_window) {
VLOG(2) << "Converting R1 reduce window: " << reduce_window->ToString();
std::vector<Shape> r2_output_shapes;
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), shape_index)) {
return;
}
Shape r2_output_shape = subshape;
ShapeUtil::AppendMajorDimension(1, &r2_output_shape);
UpdateLayout(&r2_output_shape);
r2_output_shapes.push_back(r2_output_shape);
VLOG(2) << "ReduceWindowRewriter: Converting R2 result to R1: "
<< ShapeUtil::HumanStringWithLayout(r2_output_shape);
});
Window r2_window = reduce_window->window();
WindowDimension* dim = r2_window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
std::vector<HloInstruction*> r2_operands;
for (HloInstruction* operand : reduce_window->inputs()) {
Shape r2_input_shape = operand->shape();
ShapeUtil::AppendMajorDimension(1, &r2_input_shape);
UpdateLayout(&r2_input_shape);
VLOG(2) << "ReduceWindowRewriter: Converting R1 operand to R2: "
<< ShapeUtil::HumanStringWithLayout(r2_input_shape);
HloInstruction* r2_operand = operand->parent()->AddInstruction(
HloInstruction::CreateReshape(r2_input_shape, operand));
VLOG(2) << "R2 new operand: " << r2_operand->ToString();
r2_operands.push_back(r2_operand);
}
HloInstruction* new_reduce_window = reduce_window->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(r2_output_shapes)
: r2_output_shapes[0],
r2_operands, reduce_window->init_values(), r2_window,
reduce_window->to_apply()));
VLOG(2) << "R2 resulting reduce window: " << new_reduce_window->ToString();
std::vector<HloInstruction*> final_reshapes;
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), shape_index)) {
return;
}
HloInstruction* final_reshape =
new_reduce_window->parent()->AddInstruction(
HloInstruction::CreateReshape(
subshape, GetAtIndex(new_reduce_window, shape_index)));
final_reshapes.push_back(final_reshape);
});
HloInstruction* result;
if (reduce_window->shape().IsTuple()) {
result = new_reduce_window->parent()->AddInstruction(
HloInstruction::CreateTuple(final_reshapes));
} else {
CHECK_EQ(final_reshapes.size(), 1);
result = final_reshapes[0];
}
TF_RETURN_IF_ERROR(reduce_window->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(
new_reduce_window->parent()->RemoveInstruction(reduce_window));
return absl::OkStatus();
}
absl::StatusOr<bool> ReduceWindowRewriter::TryOptimizeCumSumOrProd(
HloReduceWindowInstruction* reduce_window) {
const Shape& operand_shape = reduce_window->inputs().front()->shape();
int64_t rank = operand_shape.rank();
const Window& window = reduce_window->window();
int64_t scan_dim_num = -1;
for (int i = 0; i < rank; ++i) {
const WindowDimension& window_dim = window.dimensions(i);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
if (scan_dim_num != -1) {
return false;
}
scan_dim_num = i;
}
if (scan_dim_num == -1) {
return false;
}
const int64_t scan_length = operand_shape.dimensions(scan_dim_num);
absl::Span<HloInstruction* const> init_values = reduce_window->init_values();
const WindowDimension& scan_window_dim = window.dimensions(scan_dim_num);
bool forward_scan = (scan_window_dim.padding_low() == scan_length - 1 ||
scan_window_dim.padding_low() == scan_length) &&
scan_window_dim.padding_high() == 0;
bool reverse_scan = (scan_window_dim.padding_high() == scan_length - 1 ||
scan_window_dim.padding_high() == scan_length) &&
scan_window_dim.padding_low() == 0;
if (scan_window_dim.stride() != 1 || scan_window_dim.size() != scan_length ||
(!forward_scan && !reverse_scan) || scan_window_dim.window_reversal() ||
scan_window_dim.base_dilation() != 1 ||
scan_window_dim.window_dilation() != 1) {
return false;
}
bool is_exclusive = forward_scan
? (scan_window_dim.padding_low() == scan_length)
: (scan_window_dim.padding_high() == scan_length);
if (scan_length <= base_length_) {
return false;
}
if (reduce_window->to_apply()->root_instruction()->shape().IsTuple() &&
reduce_window->to_apply()->root_instruction()->opcode() !=
HloOpcode::kTuple) {
return false;
}
VLOG(2) << "Rewriting Scan: " << reduce_window->ToString();
HloComputation* parent = reduce_window->parent();
std::vector<HloInstruction*> sources(reduce_window->inputs().begin(),
reduce_window->inputs().end());
std::vector<int64_t> permutation(rank);
absl::c_iota(permutation, 0);
permutation[scan_dim_num] = rank - 1;
permutation[rank - 1] = scan_dim_num;
if (scan_dim_num != rank - 1) {
for (size_t i = 0; i < sources.size(); ++i) {
sources[i] = parent->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(permutation, sources[i]->shape()),
sources[i], permutation));
}
}
const int64_t padded_length = RoundUpTo(scan_length, base_length_);
if (scan_length != padded_length) {
for (size_t i = 0; i < sources.size(); ++i) {
auto* source = sources[i];
Shape padded_shape = source->shape();
padded_shape.set_dimensions(rank - 1, padded_length);
UpdateLayout(&padded_shape);
auto padding_config = MakeNoPaddingConfig(rank);
padding_config.mutable_dimensions(rank - 1)->set_edge_padding_high(
padded_length - scan_length);
sources[i] = parent->AddInstruction(HloInstruction::CreatePad(
padded_shape, source, init_values[i], padding_config));
}
}
const int64_t num_columns = padded_length / base_length_;
std::vector<HloInstruction*> tiled_sources;
std::vector<Shape> tiled_shapes;
for (size_t i = 0; i < sources.size(); ++i) {
auto* source = sources[i];
Shape tiled_shape = source->shape();
tiled_shape.set_dimensions(rank - 1, num_columns);
UpdateLayout(&tiled_shape);
ShapeUtil::AppendMajorDimension(base_length_, &tiled_shape);
tiled_shapes.push_back(tiled_shape);
tiled_sources.push_back(parent->AddInstruction(
HloInstruction::CreateReshape(tiled_shape, source)));
}
Window outer_window =
window_util::MakeWindow(std::vector<int64_t>(rank + 1, 1));
outer_window.mutable_dimensions(rank)->set_size(base_length_);
if (forward_scan) {
outer_window.mutable_dimensions(rank)->set_padding_low(base_length_ - 1);
} else {
outer_window.mutable_dimensions(rank)->set_padding_high(base_length_ - 1);
}
auto outer_reduce_window =
parent->AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(tiled_shapes)
: tiled_shapes[0],
tiled_sources, init_values, outer_window, reduce_window->to_apply()));
std::vector<Shape> column_shapes;
std::vector<HloInstruction*> last_cols;
ShapeUtil::ForEachSubshape(
outer_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return;
}
Shape column_shape = subshape;
column_shape.set_dimensions(rank, 1);
UpdateLayout(&column_shape);
std::vector<int64_t> col_slice_starts(rank + 1, 0);
std::vector<int64_t> col_slice_limits(
SpanToVector(subshape.dimensions()));
if (forward_scan) {
col_slice_starts[rank] = base_length_ - 1;
} else {
col_slice_limits[rank] = 1;
}
auto last_col = parent->AddInstruction(HloInstruction::CreateSlice(
column_shape, GetAtIndex(outer_reduce_window, shape_index),
col_slice_starts, col_slice_limits,
std::vector<int64_t>(rank + 1, 1)));
column_shape.DeleteDimension(rank);
last_col = parent->AddInstruction(
HloInstruction::CreateReshape(column_shape, last_col));
last_cols.push_back(last_col);
column_shape.set_dimensions(rank - 1, num_columns + 1);
UpdateLayout(&column_shape);
column_shapes.push_back(column_shape);
});
Window inner_window = window_util::MakeWindow(std::vector<int64_t>(rank, 1));
inner_window.mutable_dimensions(rank - 1)->set_size(num_columns);
if (forward_scan) {
inner_window.mutable_dimensions(rank - 1)->set_padding_low(num_columns);
} else {
inner_window.mutable_dimensions(rank - 1)->set_padding_high(num_columns);
}
auto inner_reduce_window =
parent->AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(column_shapes)
: column_shapes[0],
last_cols, init_values, inner_window, reduce_window->to_apply()));
std::vector<int64_t> exclusive_slice_starts(rank, 0);
std::vector<int64_t> exclusive_slice_limits =
SpanToVector(column_shapes[0].dimensions());
if (forward_scan) {
exclusive_slice_limits[rank - 1] = num_columns;
} else {
exclusive_slice_starts[rank - 1] = 1;
exclusive_slice_limits[rank - 1] = num_columns + 1;
}
std::vector<HloInstruction*> inner_scan_components;
ShapeUtil::ForEachSubshape(
inner_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(inner_reduce_window->shape(),
shape_index)) {
return;
}
size_t idx = FlattenShapeIndex(shape_index);
auto last_col = last_cols[idx];
auto* inner_slice = parent->AddInstruction(HloInstruction::CreateSlice(
last_col->shape(), GetAtIndex(inner_reduce_window, shape_index),
exclusive_slice_starts, exclusive_slice_limits,
std::vector<int64_t>(rank, 1)));
std::vector<int64_t> rank_iota(rank);
absl::c_iota(rank_iota, 0);
auto* inner_scan_component =
parent->AddInstruction(HloInstruction::CreateBroadcast(
tiled_shapes[idx], inner_slice, rank_iota));
inner_scan_components.push_back(inner_scan_component);
});
std::vector<HloInstruction*> map_operands;
ShapeUtil::ForEachSubshape(
outer_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return;
}
map_operands.push_back(GetAtIndex(outer_reduce_window, shape_index));
});
map_operands.insert(map_operands.end(), inner_scan_components.begin(),
inner_scan_components.end());
std::vector<HloInstruction*> scans;
auto status = ShapeUtil::ForEachSubshapeWithStatus(
outer_reduce_window->shape(),
[&](const Shape& subshape,
const ShapeIndex& shape_index) -> absl::Status {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return absl::OkStatus();
}
size_t idx = FlattenShapeIndex(shape_index);
auto source = sources[idx];
HloComputation* map_computation;
auto reduce_function_root =
reduce_window->to_apply()->root_instruction();
if (reduce_function_root->shape().IsTuple()) {
TF_RET_CHECK(reduce_function_root->opcode() == HloOpcode::kTuple);
auto* map_computation_root = reduce_function_root->operand(idx);
absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>
replacements;
replacements[reduce_function_root] = nullptr;
map_computation = parent->parent()->AddEmbeddedComputation(
reduce_window->to_apply()->CloneWithReplacements(
&replacements,
{}, nullptr, "clone",
map_computation_root));
} else {
map_computation = reduce_window->to_apply();
}
auto scan = parent->AddInstruction(HloInstruction::CreateMap(
ShapeAtIndex(outer_reduce_window->shape(), shape_index),
map_operands, map_computation));
scan = parent->AddInstruction(
HloInstruction::CreateReshape(source->shape(), scan));
if (scan_dim_num != rank - 1) {
scan = parent->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(permutation, source->shape()), scan,
permutation));
}
if (padded_length != scan_length) {
scan = parent->AddInstruction(HloInstruction::CreateSlice(
operand_shape, scan, std::vector<int64_t>(rank, 0),
operand_shape.dimensions(), std::vector<int64_t>(rank, 1)));
}
if (is_exclusive) {
auto padding_config = MakeNoPaddingConfig(rank);
if (forward_scan) {
padding_config.mutable_dimensions(scan_dim_num)
->set_edge_padding_low(1);
} else {
padding_config.mutable_dimensions(scan_dim_num)
->set_edge_padding_high(1);
}
scan = parent->AddInstruction(HloInstruction::CreatePad(
ShapeAtIndex(reduce_window->shape(), shape_index), scan,
init_values[idx], padding_config));
}
scans.push_back(scan);
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
HloInstruction* scan;
if (reduce_window->shape().IsTuple()) {
scan = parent->AddInstruction(HloInstruction::CreateTuple(scans));
} else {
CHECK_EQ(scans.size(), 1);
scan = scans[0];
}
TF_RETURN_IF_ERROR(reduce_window->ReplaceAllUsesWith(scan));
TF_RETURN_IF_ERROR(parent->RemoveInstruction(reduce_window));
return true;
}
absl::StatusOr<bool> ReduceWindowRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto& computation : module->computations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
HloReduceWindowInstruction* reduce_window =
DynCast<HloReduceWindowInstruction>(instruction);
if (!reduce_window) {
continue;
}
TF_ASSIGN_OR_RETURN(bool made_change,
TryOptimizeCumSumOrProd(reduce_window));
if (made_change) {
changed = true;
continue;
}
if (reduce_window->inputs().front()->shape().rank() != 1) {
continue;
}
TF_RETURN_IF_ERROR(ReplaceReduceWindowWithReshape(reduce_window));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_window_rewriter.h"
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ReduceWindowRewriterTest : public HloTestBase {
public:
void CheckReduceWindowRewrite(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, ReduceWindowRewriter{128}, expected);
}
};
TEST_F(ReduceWindowRewriterTest, EliminateR1) {
const char* hlo = R"(
%binary_add {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %a, f32[] %b)
}
ENTRY %EliminateR1 (input: f32[10]) -> f32[10] {
%input = f32[10]{0} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[10]{0} reduce-window(f32[10]{0} %input, f32[] %constant), window={size=5 pad=2_2}, to_apply=%binary_add
}
)";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, EliminateR1Variadic) {
const char* hlo = R"(
HloModule reduce-window
add_float {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT root = (f32[], f32[]) tuple(sum.0, sum.1)
}
ENTRY entry (arg: f32[10]) -> (f32[10], f32[10]) {
arg = f32[10]{0} parameter(0)
constant = f32[] constant(0)
ROOT reduce-window = (f32[10]{0}, f32[10]{0}) reduce-window(f32[10]{0} %arg, f32[10]{0} %arg, f32[] %constant, f32[] %constant), window={size=5 pad=2_2}, to_apply=%add_float
})";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, OptimizeR1InclusiveScan) {
const char* hlo = R"(
HloModule reduce-window
add_float {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY entry (arg: f32[46592]) -> f32[46592] {
arg = f32[46592]{0} parameter(0)
constant = f32[] constant(0)
ROOT reduce-window = f32[46592]{0} reduce-window(f32[46592]{0} %arg, f32[] %constant), window={size=46592 pad=46591_0}, to_apply=%add_float
})";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, OptimizeR1InclusiveScanVariadic) {
const std::string hlo_string = R"(
HloModule reduce-window
MaxMin {
l.max = f32[] parameter(0)
l.min = f32[] parameter(1)
r.max = f32[] parameter(2)
r.min = f32[] parameter(3)
max = f32[] maximum(l.max, r.max)
min = f32[] minimum(l.min, r.min)
ROOT root = (f32[], f32[]) tuple(max, min)
}
ENTRY entry (arg_0: f32[46592], arg_1: f32[46592]) -> (f32[46592], f32[46592]) {
arg.0 = f32[46592]{0} parameter(0)
arg.1 = f32[46592]{0} parameter(1)
init_ninf = f32[] constant(-inf)
init_inf = f32[] constant(inf)
ROOT reduce-window = (f32[46592]{0}, f32[46592]{0}) reduce-window(f32[46592]{0} %arg.0, f32[46592]{0} %arg.1, f32[] %init_ninf, f32[] %init_inf), window={size=46592 pad=46591_0}, to_apply=%MaxMin
}
)";
CheckReduceWindowRewrite(hlo_string, R"(
)");
}
}
} |
1,878 | cpp | tensorflow/tensorflow | hlo_ordering | third_party/xla/xla/service/hlo_ordering.cc | third_party/xla/xla/service/hlo_ordering_test.cc | #ifndef XLA_SERVICE_HLO_ORDERING_H_
#define XLA_SERVICE_HLO_ORDERING_H_
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/types.h"
namespace xla {
class HloOrdering {
public:
explicit HloOrdering(const HloModule* module)
: module_(module), call_graph_(CallGraph::Build(module)) {}
virtual ~HloOrdering() = default;
enum class ExecutionConstraint {
kIsSame,
kRunBeforeStart,
kRunBeforeEnd,
kRunExclusiveBefore,
kRunExclusiveAfter,
kRunAfter,
kUnordered,
};
HloOrdering::ExecutionConstraint GetExecutionConstraint(
const HloInstruction* a, const HloInstruction* b) const;
bool ExecutesBefore(const HloInstruction* a, const HloInstruction* b) const;
bool IsDefinedBefore(const HloValue& a, const HloValue& b) const;
bool UsesBeforeValueDefinition(
absl::Span<const HloUse* const> uses, const HloValue& value,
const HloDataflowAnalysis& dataflow,
bool use_is_always_before_def_in_same_instr = false) const;
bool MayInterfere(const HloValue& a, const HloValue& b,
const HloDataflowAnalysis& dataflow) const;
bool LiveRangeStrictlyBefore(
const HloValue& a, const HloValue& b, const HloDataflowAnalysis& dataflow,
bool use_is_always_before_def_in_same_instr = false) const;
virtual const HloInstructionSequence* SequentialOrder(
const HloComputation& computation) const = 0;
const CallGraph& call_graph() const { return *call_graph_; }
virtual std::string ToString() const = 0;
protected:
virtual bool ExecutesBeforeInSameComputation(
const HloInstruction* a, const HloInstruction* b) const = 0;
const HloModule* module_;
std::unique_ptr<CallGraph> call_graph_;
};
class PredecessorHloOrdering : public HloOrdering {
public:
~PredecessorHloOrdering() override = default;
const HloInstructionSequence* SequentialOrder(
const HloComputation& computation) const override {
return nullptr;
}
HloReachabilityMap& reachability_map(const HloComputation* computation) {
return *predecessors_.at(computation);
}
const HloReachabilityMap& reachability_map(
const HloComputation* computation) const {
return *predecessors_.at(computation);
}
protected:
explicit PredecessorHloOrdering(const HloModule* module);
std::string ToStringHelper(const std::string& name) const;
bool ExecutesBeforeInSameComputation(const HloInstruction* a,
const HloInstruction* b) const override;
absl::flat_hash_map<const HloComputation*,
std::unique_ptr<HloReachabilityMap>>
predecessors_;
};
class DependencyHloOrdering : public PredecessorHloOrdering {
public:
explicit DependencyHloOrdering(const HloModule* module);
~DependencyHloOrdering() override = default;
std::string ToString() const override;
};
class SequentialHloOrdering : public HloOrdering {
public:
explicit SequentialHloOrdering(const HloSchedule& schedule);
explicit SequentialHloOrdering(HloSchedule&& schedule);
~SequentialHloOrdering() override = default;
const HloInstructionSequence* SequentialOrder(
const HloComputation& computation) const override;
std::string ToString() const override;
protected:
void Initialize();
bool ExecutesBeforeInSameComputation(const HloInstruction* a,
const HloInstruction* b) const override;
const HloSchedule schedule_;
absl::flat_hash_map<const HloInstruction*, int> order_position_;
};
}
#endif
#include "xla/service/hlo_ordering.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
bool HloOrdering::ExecutesBefore(const HloInstruction* a,
const HloInstruction* b) const {
switch (GetExecutionConstraint(a, b)) {
case ExecutionConstraint::kIsSame:
return false;
case ExecutionConstraint::kRunBeforeStart:
case ExecutionConstraint::kRunBeforeEnd:
case ExecutionConstraint::kRunExclusiveBefore:
return true;
case ExecutionConstraint::kRunExclusiveAfter:
case ExecutionConstraint::kRunAfter:
case ExecutionConstraint::kUnordered:
return false;
}
}
HloOrdering::ExecutionConstraint HloOrdering::GetExecutionConstraint(
const HloInstruction* a, const HloInstruction* b) const {
auto is_async_wrapped = [](const HloInstruction* a, const HloInstruction* b) {
return a->IsAsynchronous() && a->async_wrapped_instruction() == b;
};
if (a == b || is_async_wrapped(a, b) || is_async_wrapped(b, a)) {
return ExecutionConstraint::kIsSame;
}
const HloInstruction* a_ancestor;
const HloInstruction* b_ancestor;
std::tie(a_ancestor, b_ancestor) =
call_graph_->NearestAncestorsInSameComputation(
const_cast<HloInstruction*>(a), const_cast<HloInstruction*>(b));
if (a_ancestor == nullptr) {
VLOG(4) << "Ancestors in a common computation could not be found between"
<< a->ToString() << "\n and \n"
<< b->ToString() << "\n so consider them to be unordered.\n";
return ExecutionConstraint::kUnordered;
}
CHECK_NE(b_ancestor, nullptr);
CHECK_EQ(a_ancestor->parent(), b_ancestor->parent());
if (a_ancestor == b_ancestor && a_ancestor->opcode() == HloOpcode::kWhile) {
const HloComputation* body = a_ancestor->while_body();
const HloComputation* condition = a_ancestor->while_condition();
if (call_graph_->InstructionIsNestedIn(a, condition) &&
call_graph_->InstructionIsNestedIn(b, body)) {
return ExecutionConstraint::kRunBeforeEnd;
}
}
if (a_ancestor == b_ancestor &&
(a_ancestor->opcode() == HloOpcode::kConditional)) {
int a_branch = -1;
int b_branch = -1;
for (int j = 0; j < a_ancestor->branch_count(); ++j) {
if (call_graph_->InstructionIsNestedIn(
a, a_ancestor->branch_computation(j))) {
a_branch = j;
}
if (call_graph_->InstructionIsNestedIn(
b, a_ancestor->branch_computation(j))) {
b_branch = j;
}
}
if (a_branch == -1 && b_branch == -1) {
CHECK_EQ(a, a_ancestor);
CHECK_EQ(b, b_ancestor);
CHECK_EQ(a, b);
return ExecutionConstraint::kIsSame;
}
if (b_branch == -1) {
CHECK_EQ(b, a_ancestor);
return ExecutionConstraint::kRunBeforeEnd;
}
if (a_branch == -1) {
CHECK_EQ(a, a_ancestor);
return ExecutionConstraint::kRunAfter;
}
if (a_branch < b_branch) {
return ExecutionConstraint::kRunExclusiveBefore;
}
if (b_branch < a_branch) {
return ExecutionConstraint::kRunExclusiveAfter;
}
}
if (ExecutesBeforeInSameComputation(a_ancestor, b_ancestor)) {
return ExecutionConstraint::kRunBeforeStart;
}
if (ExecutesBeforeInSameComputation(b_ancestor, a_ancestor)) {
return ExecutionConstraint::kRunAfter;
}
VLOG(1) << "Cannot determine order between:" << a->ToString() << "\n"
<< "and " << b->ToString() << " which are in the same computation\n";
return ExecutionConstraint::kUnordered;
}
bool HloOrdering::IsDefinedBefore(const HloValue& a, const HloValue& b) const {
const HloModule* module = b.defining_instruction()->GetModule();
if (b.defining_instruction()->parent() == module->entry_computation() &&
b.defining_instruction()->opcode() == HloOpcode::kParameter) {
return false;
}
if (a.defining_instruction()->parent() == module->entry_computation() &&
a.defining_instruction()->opcode() == HloOpcode::kParameter) {
return true;
}
auto is_body_or_condition_phi = [](const HloValue& v) {
return v.is_phi() &&
v.defining_instruction()->opcode() == HloOpcode::kParameter;
};
if (is_body_or_condition_phi(a) && !is_body_or_condition_phi(b) &&
call_graph_->InstructionIsNestedIn(b.defining_instruction(),
a.defining_instruction()->parent())) {
return true;
}
if (is_body_or_condition_phi(b) &&
call_graph_->InstructionIsNestedIn(a.defining_instruction(),
b.defining_instruction()->parent())) {
return false;
}
if (b.is_phi() && b.defining_instruction()->opcode() == HloOpcode::kWhile &&
(call_graph_->InstructionIsNestedIn(
a.defining_instruction(), b.defining_instruction()->while_body()) ||
call_graph_->InstructionIsNestedIn(
a.defining_instruction(),
b.defining_instruction()->while_condition()))) {
return true;
}
if (b.is_phi() &&
b.defining_instruction()->opcode() == HloOpcode::kConditional) {
for (int j = 0; j < b.defining_instruction()->branch_count(); ++j) {
if (call_graph_->InstructionIsNestedIn(
a.defining_instruction(),
b.defining_instruction()->branch_computation(j))) {
return true;
}
}
}
return ExecutesBefore(a.defining_instruction(), b.defining_instruction());
}
bool HloOrdering::UsesBeforeValueDefinition(
absl::Span<const HloUse* const> uses, const HloValue& value,
const HloDataflowAnalysis& dataflow,
bool use_is_always_before_def_in_same_instr) const {
bool has_use_in_exclusive_branches = false;
bool has_escaped_use_in_conditional = false;
auto UseIsBeforeValueDefinition = [&](const HloUse& use) {
VLOG(4) << "UseIsBeforeValueDefinition(use=" << use
<< ", value=" << value.ToShortString() << ")";
switch (
GetExecutionConstraint(use.instruction, value.defining_instruction())) {
case HloOrdering::ExecutionConstraint::kIsSame: {
if (use_is_always_before_def_in_same_instr) {
return true;
}
HloInstruction* operand =
use.instruction->mutable_operand(use.operand_number);
HloInstruction* user = value.defining_instruction();
auto operand_index_ptr =
std::make_unique<ShapeIndex>(use.operand_index);
if (use.instruction->IsAsynchronous()) {
if (value.defining_instruction()->parent() ==
use.instruction->async_wrapped_computation()) {
if (use.instruction->opcode() == HloOpcode::kAsyncStart) {
operand = use.instruction->async_wrapped_computation()
->parameter_instruction(use.operand_number);
} else {
CHECK_GT(use.operand_index.size(), 1);
operand = use.instruction->async_wrapped_computation()
->parameter_instruction(use.operand_index.at(1));
operand_index_ptr = std::make_unique<ShapeIndex>(
absl::MakeSpan(use.operand_index)
.subspan(2, use.operand_index.size() - 2));
}
}
}
if (dataflow.CanShareOperandBufferWithUser(
operand,
*operand_index_ptr,
user,
value.defining_index())) {
VLOG(4)
<< " use is value def, and instruction can share use buffer.";
return true;
}
break;
}
case HloOrdering::ExecutionConstraint::kRunExclusiveAfter:
VLOG(4) << " use and value def are in exclusive branches.";
if (!has_escaped_use_in_conditional) {
has_use_in_exclusive_branches = true;
VLOG(4) << "Allowing them to share buffer.\n";
return true;
}
VLOG(4) << "value def has escaped use in conditional. \n";
break;
case HloOrdering::ExecutionConstraint::kRunExclusiveBefore:
case HloOrdering::ExecutionConstraint::kRunBeforeStart:
case HloOrdering::ExecutionConstraint::kRunBeforeEnd:
VLOG(4)
<< " use instruction executes before value-defining instruction";
return true;
case HloOrdering::ExecutionConstraint::kRunAfter:
if (use_is_always_before_def_in_same_instr &&
use.instruction->opcode() == HloOpcode::kCollectivePermuteDone &&
use.instruction->operand(0) == value.instruction()) {
return true;
}
break;
case HloOrdering::ExecutionConstraint::kUnordered:
break;
}
if (use.instruction->opcode() == HloOpcode::kWhile) {
const HloInstruction* xla_while = use.instruction;
if (call_graph_->InstructionIsNestedIn(value.defining_instruction(),
xla_while->while_body())) {
VLOG(4) << " use is while " << use.instruction->name()
<< " and def is in body";
return true;
}
if (call_graph_->InstructionIsNestedIn(value.defining_instruction(),
xla_while->while_condition())) {
if (value.defining_instruction() !=
xla_while->while_condition()->parameter_instruction(0)) {
VLOG(4) << " use is while " << use.instruction->name()
<< " and def is in condition and is not the parameter";
return false;
} else {
VLOG(4) << " use is while " << use.instruction->name()
<< " and def is in condition and is the parameter";
return true;
}
}
}
if (value.defining_instruction()->opcode() == HloOpcode::kWhile) {
CHECK(value.is_phi());
const HloInstruction* xla_while = value.defining_instruction();
if (call_graph_->InstructionIsNestedIn(use.instruction,
xla_while->while_body()) ||
call_graph_->InstructionIsNestedIn(use.instruction,
xla_while->while_condition())) {
VLOG(4) << " value is while " << value.defining_instruction()->name()
<< " and use is in condition or body";
return true;
}
}
if (use.instruction->opcode() == HloOpcode::kCall) {
const HloInstruction* call = use.instruction;
if (call_graph_->InstructionIsNestedIn(value.defining_instruction(),
call->to_apply())) {
VLOG(4) << " use is call " << use.instruction->name()
<< " and def is in called computation";
return true;
}
}
if (use.instruction->IsAsynchronous() &&
use.instruction->async_wrapped_opcode() == HloOpcode::kCall) {
const HloInstruction* async = use.instruction;
if (call_graph_->InstructionIsNestedIn(
value.defining_instruction(),
async->async_wrapped_instruction()->to_apply())) {
VLOG(4) << " use is async " << use.instruction->name()
<< " and def is in called computation";
return true;
}
}
if (use.instruction->opcode() == HloOpcode::kConditional) {
const HloInstruction* conditional = use.instruction;
for (int j = 0; j < conditional->branch_count(); ++j) {
if (call_graph_->InstructionIsNestedIn(
value.defining_instruction(),
conditional->branch_computation(j))) {
if (!dataflow.ValueIsDefinedAt(
use.instruction->operand(use.operand_number), {})) {
for (auto value_use : value.GetUses()) {
VLOG(4) << "def have use:" << value_use << "\n";
if (value_use.instruction ==
value_use.instruction->parent()->root_instruction()) {
VLOG(4) << "def use is conditional root \n";
has_escaped_use_in_conditional = true;
break;
}
}
}
if (!has_use_in_exclusive_branches) {
VLOG(4) << " use is conditional " << use.instruction->name()
<< " and def is in " << j << "th branch computation";
return true;
}
}
}
if (value.defining_instruction() == use.instruction) {
VLOG(4) << " use is conditional " << use << " and def is "
<< value.ToShortString();
return true;
}
}
VLOG(4) << " use is not before value definition";
return false;
};
for (auto* use : uses) {
if (!UseIsBeforeValueDefinition(*use)) {
return false;
}
}
return true;
}
bool HloOrdering::LiveRangeStrictlyBefore(
const HloValue& a, const HloValue& b, const HloDataflowAnalysis& dataflow,
bool use_is_always_before_def_in_same_instr) const {
VLOG(4) << "LiveRangeStrictlyBefore(a = " << a.ToShortString()
<< ", b = " << b.ToShortString() << ")";
VLOG(4) << "Parent:" << a.instruction()->parent()->ToString() << "\n";
if (!IsDefinedBefore(a, b)) {
VLOG(4) << a << " not defined before " << b;
return false;
}
if (a.live_out_of_module()) {
VLOG(4) << a << " is live out of module and not defined before " << b;
return false;
}
for (const HloPosition& pos : a.positions()) {
if (pos.instruction->parent()->root_instruction() == pos.instruction &&
call_graph().InstructionIsNestedIn(b.instruction(),
pos.instruction->parent())) {
return false;
}
}
std::vector<const HloUse*> uses;
for (const HloUse& use : a.GetUses()) {
if (dataflow.DoesNotUseOperandBuffer(a.instruction(), a.index(),
use.instruction)) {
continue;
}
uses.push_back(&use);
}
if (!UsesBeforeValueDefinition(uses, b, dataflow,
use_is_always_before_def_in_same_instr)) {
VLOG(4) << "uses of " << a << "not before " << b << " is defined";
return false;
}
if (a.IsRootOf(b.instruction()->parent())) {
VLOG(4) << a << " is live out of computation and defined before " << b
<< " which is in same computation";
return false;
}
return true;
}
bool HloOrdering::MayInterfere(const HloValue& a, const HloValue& b,
const HloDataflowAnalysis& dataflow) const {
return !LiveRangeStrictlyBefore(a, b, dataflow) &&
!LiveRangeStrictlyBefore(b, a, dataflow);
}
PredecessorHloOrdering::PredecessorHloOrdering(const HloModule* module)
: HloOrdering(module) {}
bool PredecessorHloOrdering::ExecutesBeforeInSameComputation(
const HloInstruction* a, const HloInstruction* b) const {
CHECK_EQ(a->parent(), b->parent());
return a != b && predecessors_.at(a->parent())->IsReachable(a, b);
}
std::string PredecessorHloOrdering::ToStringHelper(
const std::string& name) const {
std::vector<std::string> pieces;
pieces.push_back(name);
for (auto* computation : module_->MakeNonfusionComputations()) {
pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
const auto all = computation->MakeInstructionPostOrder();
for (auto instruction : all) {
pieces.push_back(
absl::StrFormat(" %s predecessors:", instruction->name()));
for (auto predecessor : all) {
if (predecessors_.at(computation)
->IsReachable(predecessor, instruction)) {
pieces.push_back(absl::StrFormat(" %s", predecessor->name()));
}
}
}
}
return absl::StrJoin(pieces, "\n");
}
DependencyHloOrdering::DependencyHloOrdering(const HloModule* module)
: PredecessorHloOrdering(module) { | #include "xla/service/hlo_ordering.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloOrderingTest : public HloTestBase {};
TEST_F(HloOrderingTest, InstructionsInDifferentComputations) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto builder_c = HloComputation::Builder("C");
HloInstruction* c = builder_c.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
HloComputation* computation_c =
module->AddEmbeddedComputation(builder_c.Build());
auto builder_b = HloComputation::Builder("B");
builder_b.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* b = builder_b.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {}, computation_c));
HloComputation* computation_b =
module->AddEmbeddedComputation(builder_b.Build());
auto builder_a = HloComputation::Builder("A");
HloInstruction* a = builder_a.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {}, computation_c));
HloComputation* computation_a =
module->AddEmbeddedComputation(builder_a.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {}, computation_a));
HloInstruction* y = builder.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {x}, computation_b));
module->AddEntryComputation(builder.Build());
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.ExecutesBefore(x, y));
EXPECT_FALSE(ordering.ExecutesBefore(y, x));
EXPECT_TRUE(ordering.ExecutesBefore(a, b));
EXPECT_FALSE(ordering.ExecutesBefore(b, a));
EXPECT_FALSE(ordering.ExecutesBefore(a, x));
EXPECT_TRUE(ordering.ExecutesBefore(a, y));
EXPECT_FALSE(ordering.ExecutesBefore(x, a));
EXPECT_FALSE(ordering.ExecutesBefore(y, a));
EXPECT_FALSE(ordering.ExecutesBefore(b, x));
EXPECT_FALSE(ordering.ExecutesBefore(b, y));
EXPECT_TRUE(ordering.ExecutesBefore(x, b));
EXPECT_FALSE(ordering.ExecutesBefore(y, b));
EXPECT_FALSE(ordering.ExecutesBefore(c, a));
EXPECT_FALSE(ordering.ExecutesBefore(c, b));
EXPECT_FALSE(ordering.ExecutesBefore(c, x));
EXPECT_FALSE(ordering.ExecutesBefore(c, y));
EXPECT_FALSE(ordering.ExecutesBefore(a, c));
EXPECT_FALSE(ordering.ExecutesBefore(b, c));
EXPECT_FALSE(ordering.ExecutesBefore(x, c));
EXPECT_FALSE(ordering.ExecutesBefore(y, c));
}
TEST_F(HloOrderingTest, InstructionsInWhileComputations) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "body_param"));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape, HloOpcode::kNegate, body_param));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "cond_param"));
auto convert = cond_builder.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(xla::PRED, {}), cond_param));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape, condition, body, constant));
module->AddEntryComputation(builder.Build());
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.ExecutesBefore(constant, xla_while));
EXPECT_TRUE(ordering.ExecutesBefore(constant, cond_param));
EXPECT_TRUE(ordering.ExecutesBefore(constant, convert));
EXPECT_TRUE(ordering.ExecutesBefore(constant, body_param));
EXPECT_TRUE(ordering.ExecutesBefore(constant, negate));
EXPECT_FALSE(ordering.ExecutesBefore(xla_while, body_param));
EXPECT_FALSE(ordering.ExecutesBefore(xla_while, cond_param));
EXPECT_FALSE(ordering.ExecutesBefore(body_param, xla_while));
EXPECT_FALSE(ordering.ExecutesBefore(cond_param, xla_while));
EXPECT_TRUE(ordering.ExecutesBefore(cond_param, body_param));
EXPECT_TRUE(ordering.ExecutesBefore(convert, body_param));
EXPECT_TRUE(ordering.ExecutesBefore(cond_param, negate));
EXPECT_TRUE(ordering.ExecutesBefore(convert, negate));
EXPECT_FALSE(ordering.ExecutesBefore(body_param, cond_param));
}
TEST_F(HloOrderingTest, ParametersDefinedBeforeOthers) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(param),
dataflow->GetValueDefinedAt(constant)));
EXPECT_TRUE(!ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(param)));
}
TEST_F(HloOrderingTest, ValuesInWhileComputations) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "body_param"));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape, HloOpcode::kNegate, body_param));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "cond_param"));
auto convert = cond_builder.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(xla::PRED, {}), cond_param));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape, condition, body, constant));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, constant, xla_while));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while)));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while), *dataflow));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(convert), *dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(negate),
dataflow->GetValueDefinedAt(xla_while)));
EXPECT_TRUE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(negate),
dataflow->GetValueDefinedAt(xla_while), *dataflow));
EXPECT_FALSE(ordering.MayInterfere(dataflow->GetValueDefinedAt(negate),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while)));
EXPECT_TRUE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(convert),
dataflow->GetValueDefinedAt(xla_while), *dataflow));
EXPECT_FALSE(ordering.MayInterfere(dataflow->GetValueDefinedAt(convert),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(xla_while),
dataflow->GetValueDefinedAt(add)));
ASSERT_EQ(dataflow->GetValueDefinedAt(xla_while).GetUses().size(), 1);
const HloUse* while_use =
dataflow->GetValueDefinedAt(xla_while).GetUses().data();
EXPECT_EQ(while_use->instruction, add);
EXPECT_TRUE(ordering.UsesBeforeValueDefinition(
{&while_use, 1}, dataflow->GetValueDefinedAt(add), *dataflow));
EXPECT_TRUE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(xla_while), dataflow->GetValueDefinedAt(add),
*dataflow));
}
TEST_F(HloOrderingTest, ToStringDoesNotCrash) {
const char* module_str = R"(
HloModule test_module
body.v8 {
prev.1 = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(prev.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.4, constant.1)
get-tuple-element.5 = f32[3]{0} get-tuple-element(prev.1), index=3
get-tuple-element.6 = f32[3]{0} get-tuple-element(prev.1), index=1
get-tuple-element.7 = f32[3]{0} get-tuple-element(prev.1), index=2
ROOT tuple = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) tuple(add, get-tuple-element.5, get-tuple-element.6, get-tuple-element.7)
}
condition.v4 {
constant.2 = s32[] constant(2)
prev.2 = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) parameter(0)
get-tuple-element.8 = s32[] get-tuple-element(prev.2), index=0
ROOT greater-than = pred[] compare(constant.2, get-tuple-element.8), direction=GT
}
fused_computation {
get-tuple-element.5.param_1 = f32[3]{0} parameter(1)
get-tuple-element.6.param_2 = f32[3]{0} parameter(2)
add.4 = f32[3]{0} add(get-tuple-element.5.param_1, get-tuple-element.6.param_2)
get-tuple-element.7.param_1.1 = f32[3]{0} parameter(0)
ROOT add.5 = f32[3]{0} add(add.4, get-tuple-element.7.param_1.1)
}
ENTRY while.v11 {
constant.5 = s32[] constant(0)
constant.6 = f32[3]{0} constant({1, 1, 1})
constant.7 = f32[3]{0} constant({2, 2, 2})
constant.8 = f32[3]{0} constant({3, 3, 3})
tuple.1 = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) tuple(constant.5, constant.6, constant.7, constant.8)
while = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) while(tuple.1), condition=condition.v4, body=body.v8
get-tuple-element.9 = f32[3]{0} get-tuple-element(while), index=3
get-tuple-element.10 = f32[3]{0} get-tuple-element(while), index=1
get-tuple-element.11 = f32[3]{0} get-tuple-element(while), index=2
ROOT fusion = f32[3]{0} fusion(get-tuple-element.9, get-tuple-element.10, get-tuple-element.11), kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DependencyHloOrdering ordering(module.get());
ordering.ToString();
}
TEST_F(HloOrderingTest, ConditionalInstructionOrdering) {
const char* module_str = R"(
HloModule test_conditional_module
true_branch {
param.1 = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(param.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(param.1), index=1
add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple.1 = (s32[], s32[]) tuple(add.1, get-tuple-element.1)
}
false_branch {
param.2 = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(param.2), index=0
get-tuple-element.4 = s32[] get-tuple-element(param.2), index=1
add.2 = s32[] add(get-tuple-element.3, get-tuple-element.4)
ROOT tuple.2 = (s32[], s32[]) tuple(add.2, get-tuple-element.4)
}
ENTRY root {
param.3 = (pred[], (s32[], s32[])) parameter(0)
pred.1 = pred[] get-tuple-element(param.3), index=0
cond_arg.1 = (s32[], s32[]) get-tuple-element(param.3), index=1
conditional = (s32[], s32[]) conditional(pred.1, cond_arg.1, cond_arg.1), true_computation=true_branch, false_computation=false_branch
cond_res.1 = s32[] get-tuple-element(conditional), index=0
cond_res.2 = s32[] get-tuple-element(conditional), index=1
add.3 = s32[] add(cond_res.1, cond_res.2)
ROOT result = (s32[], s32[], s32[]) tuple(add.3, cond_res.1, cond_res.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
HloInstruction* add_1 = FindInstruction(module.get(), "add.1");
HloInstruction* add_2 = FindInstruction(module.get(), "add.2");
HloInstruction* add_3 = FindInstruction(module.get(), "add.3");
HloInstruction* conditional = FindInstruction(module.get(), "conditional");
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_1),
dataflow->GetValueDefinedAt(add_2)));
EXPECT_TRUE(
ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_2),
dataflow->GetValueDefinedAt(conditional)));
EXPECT_TRUE(
ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_1),
dataflow->GetValueDefinedAt(conditional)));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_1),
dataflow->GetValueDefinedAt(add_3)));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_2),
dataflow->GetValueDefinedAt(add_3)));
}
TEST_F(HloOrderingTest,
ValuesLiveOutOfModuleInterfereWithInstructionsAfterRoot) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto builder = HloComputation::Builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
HloInstruction* dead = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
HloComputation* entry =
module->AddEntryComputation(builder.Build(root));
HloSchedule schedule(module.get());
schedule.set_sequence(entry, {param, root, dead});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
EXPECT_FALSE(ordering.ExecutesBefore(root, dead));
EXPECT_FALSE(ordering.ExecutesBefore(dead, root));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(root), dataflow->GetValueDefinedAt(dead),
*dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(root),
dataflow->GetValueDefinedAt(dead),
*dataflow));
}
TEST_F(HloOrderingTest,
ValuesLiveOutOfComputationInterfereWithInstructionsAfterRoot) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto subbuilder = HloComputation::Builder(TestName() + ".sub");
HloInstruction* param = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* root = subbuilder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
HloInstruction* dead = subbuilder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
HloComputation* subcomputation = module->AddEmbeddedComputation(
subbuilder.Build(root));
auto builder = HloComputation::Builder(TestName());
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {c}, subcomputation));
HloComputation* entry = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(subcomputation, {param, root, dead});
schedule.set_sequence(entry, {c, call});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
EXPECT_FALSE(ordering.ExecutesBefore(root, dead));
EXPECT_FALSE(ordering.ExecutesBefore(dead, root));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(root), dataflow->GetValueDefinedAt(dead),
*dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(root),
dataflow->GetValueDefinedAt(dead),
*dataflow));
}
TEST_F(HloOrderingTest, InterferenceWithOuterRoot) {
absl::string_view hlo_string = R"(
HloModule InterferenceWithOuterRoot, is_scheduled=true
Embedded (embedded_param: f32[4096,4096]) -> f32[4096,4096] {
embedded_param = f32[4096,4096]{1,0} parameter(0)
multiply = f32[4096,4096]{1,0} multiply(embedded_param, embedded_param)
ROOT log = f32[4096,4096]{1,0} log(multiply)
}
ENTRY InterferenceWithOuterRoot {
param = f32[4096,4096]{1,0} parameter(0)
ROOT add = f32[4096,4096]{1,0} add(param, param)
call = f32[4096,4096]{1,0} call(param), to_apply=Embedded
}
)";
HloModuleConfig hlo_config;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, hlo_config));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
auto multiply = FindInstruction(module.get(), "multiply");
auto add = FindInstruction(module.get(), "add");
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(multiply),
dataflow->GetValueDefinedAt(add),
*dataflow));
}
TEST_F(HloOrderingTest, RootNotLastInstruction) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
body2 {
p_body2 = (f32[2]{0}) parameter(0)
p_body2.1 = f32[2]{0} get-tuple-element(p_body2), index=0
add.3 = f32[2]{0} add(p_body2.1, p_body2.1)
ROOT root2 = (f32[2]{0}) tuple(add.3)
}
condition2 {
p_cond2 = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
body {
p_body = (f32[2]{0}) parameter(0)
p_body.1 = f32[2]{0} get-tuple-element(p_body), index=0
ROOT root = (f32[2]{0}) tuple(p_body.1)
copy = f32[2]{0} copy(p_body.1)
tuple = (f32[2]{0}) tuple(copy)
while.1 = (f32[2]{0}) while(tuple), condition=condition2, body=body2
}
condition {
p_cond = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const0 = f32[2]{0} constant({1, 2})
while_init = (f32[2]{0}) tuple(const0)
ROOT while.0 = (f32[2]{0}) while(while_init), condition=condition, body=body
}
)";
HloModuleConfig hlo_config;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, hlo_config));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
SequentialHloOrdering ordering(module->schedule());
auto root = FindInstruction(module.get(), "root");
auto p_body_2 = FindInstruction(module.get(), "p_body2");
auto tuple_use = HloUse{root, 0};
const HloValue& value = dataflow->GetUniqueValueAt(p_body_2, {0});
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&tuple_use}, value, *dataflow));
}
TEST_F(HloOrderingTest, AsyncCallUses) {
absl::string_view hlo_string = R"(
HloModule single_sc_async_call
%called_computation {
%out_param = s32[1024]{0} parameter(1)
%input = s32[1024]{0} parameter(0)
%size = s32[] constant(256)
%index = s32[] custom-call(), custom_call_target="Baz"
%start = s32[] multiply(s32[] %size, s32[] %index)
%input2 = s32[256]{0} dynamic-slice(s32[1024]{0} %input, s32[] %start), dynamic_slice_sizes={256}
%output = s32[256]{0} add(s32[256]{0} %input2, s32[256]{0} %input2)
ROOT %output2 = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %out_param, s32[256]{0} %output, s32[] %start)
}, execution_thread="foobar"
%async_wrapped {
%async_param = s32[1024]{0} parameter(0)
%async_param.1 = s32[1024]{0} parameter(1)
ROOT %call = s32[1024]{0} call(s32[1024]{0} %async_param, s32[1024]{0} %async_param.1), to_apply=%called_computation
}, execution_thread="foobar"
ENTRY %main {
%input.1 = s32[1024]{0} parameter(0)
%buf = s32[1024]{0} custom-call(), custom_call_target="AllocateBuffer"
%async-start = ((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) async-start(s32[1024]{0} %input.1, s32[1024]{0} %buf), async_execution_thread="foobar", calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) %async-start), async_execution_thread="foobar", calls=%async_wrapped
}
)";
HloModuleConfig hlo_config;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, hlo_config));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
auto async_start = FindInstruction(module.get(), "async-start");
auto async_done = FindInstruction(module.get(), "async-done");
auto call = FindInstruction(module.get(), "call");
auto output2 = FindInstruction(module.get(), "output2");
auto async_start_use = HloUse{async_start, 1};
auto async_done_use = HloUse{async_done, 0, {0, 1}};
auto call_use = HloUse{call, 1};
const HloValue& value = dataflow->GetUniqueValueAt(output2, {});
EXPECT_TRUE(ordering.UsesBeforeValueDefinition(
{&async_start_use, &call_use, &async_done_use}, value, *dataflow));
}
TEST_F(HloOrderingTest,
UsesBeforeValueDefinitionValueIsAsyncWrappedCallInstruction) {
constexpr absl::string_view hlo_string = R"(
HloModule UsesBeforeValueDefinitionValueIsAsyncWrappedCallInstruction, input_output_alias={ {}: (0, {}, must-alias) }, entry_computation_layout={(f32[2,2])->f32[2,2]}
%host_computation {
%arg_0.2 = f32[2,2] parameter(0)
%constant.1 = f32[] constant(2)
%broadcast.1 = f32[2,2] broadcast(f32[] %constant.1), dimensions={}
ROOT %multiply.1 = f32[2,2] multiply(f32[2,2] %arg_0.2, f32[2,2] %broadcast.1)
}, execution_thread="host"
%async_wrapped_comp {
%param_0 = f32[2,2] parameter(0)
ROOT %async_wrapped_call = f32[2,2] custom-call(f32[2,2] %param_0), custom_call_target="HostExecute", called_computations={%host_computation}
}, execution_thread="host"
ENTRY %main {
%p0 = f32[2,2] parameter(0)
%host-async-start = ((f32[2,2]), f32[2,2], u32[]) async-start(f32[2,2] %p0), async_execution_thread="host", calls=%async_wrapped_comp
%host-async-done = f32[2,2] async-done(((f32[2,2]), f32[2,2], u32[]) %host-async-start)
ROOT %copy.1 = f32[2,2] copy(f32[2,2] %host-async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> dataflow,
HloDataflowAnalysis::Run(*module, true));
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
HloInstruction* async_wrapped_call =
FindInstruction(module.get(), "async_wrapped_call");
HloInstruction* p0 = FindInstruction(module.get(), "p0");
ASSERT_NE(async_start, nullptr);
ASSERT_NE(async_done, nullptr);
ASSERT_NE(async_wrapped_call, nullptr);
ASSERT_NE(p0, nullptr);
HloUse async_start_use = HloUse{async_start, 0};
HloUse async_done_use = HloUse{async_done, 0, {0, 0}};
HloUse call_use = HloUse{async_wrapped_call, 0};
const HloValue& value = dataflow->GetUniqueValueAt(async_wrapped_call, {});
DependencyHloOrdering ordering(module.get());
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&async_start_use}, value, *dataflow));
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&call_use}, value, *dataflow));
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&async_done_use}, value, *dataflow));
}
TEST_F(HloOrderingTest,
UsesBeforeValueDefinitionValueIsAnAliasedAsyncWrappedCallInstruction) {
constexpr absl::string_view hlo_string = R"(
HloModule UsesBeforeValueDefinitionValueIsAnAliasedAsyncWrappedCallInstruction, input_output_alias={ {}: (0, {}, must-alias) }, entry_computation_layout={(f32[2,2])->f32[2,2]}
%host_computation {
%arg_0.2 = f32[2,2] parameter(0)
%constant.1 = f32[] constant(2)
%broadcast.1 = f32[2,2] broadcast(f32[] %constant.1), dimensions={}
ROOT %multiply.1 = f32[2,2] multiply(f32[2,2] %arg_0.2, f32[2,2] %broadcast.1)
}, execution_thread="host"
%async_wrapped_comp {
%param_0 = f32[2,2] parameter(0)
ROOT %async_wrapped_call = f32[2,2] custom-call(f32[2,2] %param_0), custom_call_target="HostExecute", called_computations={%host_computation}, output_to_operand_aliasing={{}: (0, {})}
}, execution_thread="host"
ENTRY %main {
%p0 = f32[2,2] parameter(0)
%host-async-start = ((f32[2,2]), f32[2,2], u32[]) async-start(f32[2,2] %p0), async_execution_thread="host", calls=%async_wrapped_comp
ROOT %host-async-done = f32[2,2] async-done(((f32[2,2]), f32[2,2], u32[]) %host-async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> dataflow,
HloDataflowAnalysis::Run(*module, true));
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
HloInstruction* async_wrapped_call =
FindInstruction(module.get(), "async_wrapped_call");
HloInstruction* p0 = FindInstruction(module.get(), "p0");
ASSERT_NE(async_start, nullptr);
ASSERT_NE(async_done, nullptr);
ASSERT_NE(async_wrapped_call, nullptr);
ASSERT_NE(p0, nullptr);
HloUse async_start_use = HloUse{async_start, 0};
HloUse async_done_use = HloUse{async_done, 0, {0, 0}};
HloUse call_use = HloUse{async_wrapped_call, 0};
const HloValue& value = dataflow->GetUniqueValueAt(async_wrapped_call, {});
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_start_use}, value, *dataflow));
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&call_use}, value, *dataflow));
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_done_use |
1,879 | cpp | tensorflow/tensorflow | float_support | third_party/xla/xla/service/float_support.cc | third_party/xla/xla/service/gpu/float_support_test.cc | #ifndef XLA_SERVICE_FLOAT_SUPPORT_H_
#define XLA_SERVICE_FLOAT_SUPPORT_H_
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/xla_data.pb.h"
namespace xla {
class FloatSupport {
public:
explicit FloatSupport(PrimitiveType low_precision_type,
PrimitiveType high_precision_type = F32)
: low_precision_type_(low_precision_type),
high_precision_type_(high_precision_type) {}
virtual ~FloatSupport() = default;
PrimitiveType LowPrecisionType() const { return low_precision_type_; }
PrimitiveType HighPrecisionType() const { return high_precision_type_; }
virtual bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const;
virtual bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const;
virtual bool SupportsMixedPrecisions(const HloInstruction& hlo) const;
static bool EffectiveOperandPrecisionIsOutputPrecision(
const HloInstruction& hlo, int64_t operand_index);
virtual bool EffectiveOperandPrecisionIsLowPrecision(
const HloInstruction& hlo, int64_t operand_index) const;
private:
PrimitiveType low_precision_type_;
PrimitiveType high_precision_type_;
};
}
#endif
#include "xla/service/float_support.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
bool FloatSupport::SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const {
switch (hlo.opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kOptimizationBarrier:
return true;
case HloOpcode::kConvert:
CHECK_EQ(operand_index, 0);
return hlo.operand(0)->shape().element_type() == low_precision_type_;
default:
break;
}
return false;
}
bool FloatSupport::SupportsLowPrecisionOutput(const HloInstruction& hlo) const {
switch (hlo.opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kOptimizationBarrier:
return true;
case HloOpcode::kConvert:
return hlo.shape().element_type() == low_precision_type_;
default:
break;
}
return false;
}
bool FloatSupport::SupportsMixedPrecisions(const HloInstruction& hlo) const {
switch (hlo.opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kConvert:
case HloOpcode::kCustomCall:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kOptimizationBarrier:
return true;
default:
break;
}
return false;
}
bool FloatSupport::EffectiveOperandPrecisionIsOutputPrecision(
const HloInstruction& hlo, int64_t operand_index) {
switch (hlo.opcode()) {
case HloOpcode::kAbs:
case HloOpcode::kAllGather:
case HloOpcode::kAllToAll:
case HloOpcode::kBroadcast:
case HloOpcode::kClamp:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kDomain:
case HloOpcode::kGetTupleElement:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
case HloOpcode::kOptimizationBarrier:
return true;
case HloOpcode::kBitcast:
return hlo.shape().element_type() ==
hlo.operand(0)->shape().element_type();
case HloOpcode::kDynamicSlice:
return operand_index == 0;
case HloOpcode::kDynamicUpdateSlice:
return operand_index == 0 || operand_index == 1;
case HloOpcode::kGather:
return operand_index == 0;
case HloOpcode::kSelect:
return operand_index == 1 || operand_index == 2;
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow: {
HloComputation* reduce_comp = hlo.called_computations()[0];
for (HloInstruction* inst : reduce_comp->instructions()) {
if (inst->opcode() == HloOpcode::kParameter) {
continue;
}
for (int64_t i = 0; i < inst->operand_count(); ++i) {
if (!EffectiveOperandPrecisionIsOutputPrecision(*inst, i)) {
return false;
}
}
}
return true;
}
default:
break;
}
return false;
}
bool FloatSupport::EffectiveOperandPrecisionIsLowPrecision(
const HloInstruction& hlo, int64_t operand_index) const {
return false;
}
} | #include <variant>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
namespace {
class FloatSupportTest : public HloTestBase {
public:
const se::GpuComputeCapability& GetGpuComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
};
class FloatSupportTestWithCublas : public FloatSupportTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = FloatSupportTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
};
class FloatSupportTestWithTriton : public FloatSupportTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = FloatSupportTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
debug_options.set_xla_gpu_triton_gemm_any(true);
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_F(FloatSupportTestWithCublas, MixedTypeDotIsNotUpcasted) {
constexpr absl::string_view kHloText = R"(
ENTRY e {
p0 = bf16[32,32] parameter(0)
p1 = bf16[32,32] parameter(1)
ROOT d = f32[32,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK-NOT: convert
; CHECK: __cublas
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-6, 1e-6}));
}
TEST_F(FloatSupportTestWithTriton, MixedTypeDotWithBF16IsNotUpcasted) {
bool skip_test = std::visit(
VariantVisitor{[](const se::CudaComputeCapability& cc) {
return !cc.IsAtLeast(se::CudaComputeCapability::AMPERE);
},
[](const se::RocmComputeCapability&) { return true; }},
GetGpuComputeCapability());
if (skip_test) {
GTEST_SKIP() << "Not supported on this GPU architecture";
}
constexpr absl::string_view kHloText = R"(
ENTRY e {
p0 = bf16[32,32] parameter(0)
p1 = bf16[32,32] parameter(1)
ROOT d = f32[32,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK-NOT: convert
; CHECK: __triton
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-6, 1e-6}));
}
}
}
} |
1,880 | cpp | tensorflow/tensorflow | scatter_simplifier | third_party/xla/xla/service/scatter_simplifier.cc | third_party/xla/xla/service/scatter_simplifier_test.cc | #ifndef XLA_SERVICE_SCATTER_SIMPLIFIER_H_
#define XLA_SERVICE_SCATTER_SIMPLIFIER_H_
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class ScatterSimplifier : public OpExpanderPass {
public:
absl::string_view name() const override { return "scatter_simplifier"; }
static bool IsSimplifiedScatter(const HloScatterInstruction* scatter);
protected:
bool InstructionMatchesPattern(HloInstruction* inst) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* inst) override;
};
}
#endif
#include "xla/service/scatter_simplifier.h"
#include <algorithm>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/permutation_util.h"
#include "xla/service/gather_scatter_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> FlattenAndTransposeUpdates(
HloInstruction* updates, absl::Span<const int64_t> update_window_dims,
absl::Span<const int64_t> inserted_window_dims,
int64_t scatter_indices_size) {
int64_t updates_rank = updates->shape().rank();
std::vector<int64_t> permutation;
const int64_t num_scatter_dims = updates_rank - update_window_dims.size();
permutation.reserve(updates_rank);
for (int i = 0; i < updates_rank; ++i) {
if (!absl::c_linear_search(update_window_dims, i)) {
permutation.push_back(i);
}
}
absl::c_copy(update_window_dims, std::back_inserter(permutation));
TF_ASSIGN_OR_RETURN(updates, MaybeTranspose(updates, permutation));
if (num_scatter_dims > 1) {
TF_ASSIGN_OR_RETURN(updates, CollapseFirstNDims(updates, num_scatter_dims));
} else if (num_scatter_dims == 0) {
TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, {0}));
}
if (!inserted_window_dims.empty()) {
std::vector<int64_t> new_dims;
new_dims.reserve(inserted_window_dims.size());
for (int64_t i : inserted_window_dims) {
new_dims.push_back(i + 1);
}
TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, new_dims));
}
return updates;
}
std::vector<int64_t> MakeUpdatePermutation(
const std::vector<int64_t>& operand_permutation) {
std::vector<int64_t> update_permutation;
update_permutation.reserve(operand_permutation.size() + 1);
update_permutation.push_back(0);
for (auto& dim : operand_permutation) {
update_permutation.push_back(dim + 1);
}
return update_permutation;
}
absl::StatusOr<std::vector<HloInstruction*>> TransformScatterUpdates(
HloScatterInstruction* scatter,
const std::vector<int64_t>& update_permutation,
int64_t scatter_indices_size) {
std::vector<HloInstruction*> scatter_updates;
const auto& attrs = scatter->scatter_dimension_numbers();
scatter_updates.reserve(scatter->scatter_updates().size());
for (auto* update : scatter->scatter_updates()) {
TF_ASSIGN_OR_RETURN(
scatter_updates.emplace_back(),
FlattenAndTransposeUpdates(update, attrs.update_window_dims(),
attrs.inserted_window_dims(),
scatter_indices_size));
}
return MaybeTranspose(scatter_updates, update_permutation);
}
ScatterDimensionNumbers MakeScatterDimensionNumbers(
int64_t operand_rank, int64_t scatter_indices_vector_size) {
ScatterDimensionNumbers dim_numbers;
dim_numbers.mutable_update_window_dims()->Reserve(
static_cast<int>(operand_rank));
for (int i = 0; i < operand_rank; ++i) {
dim_numbers.add_update_window_dims(1 + i);
}
dim_numbers.mutable_scatter_dims_to_operand_dims()->Reserve(
static_cast<int>(scatter_indices_vector_size));
for (int i = 0; i < scatter_indices_vector_size; ++i) {
dim_numbers.add_scatter_dims_to_operand_dims(i);
}
dim_numbers.set_index_vector_dim(1);
return dim_numbers;
}
}
absl::StatusOr<HloInstruction*> ScatterSimplifier::ExpandInstruction(
HloInstruction* inst) {
auto* scatter = Cast<HloScatterInstruction>(inst);
if (scatter->called_computations().size() != 1) {
return InvalidArgumentStrCat(
"Expected scatter->called_computations() to have exactly one element, "
"got ",
scatter->called_computations().size());
}
const auto& attrs = scatter->scatter_dimension_numbers();
const int operand_rank =
attrs.update_window_dims().size() + attrs.inserted_window_dims().size();
auto [operand_permutation, operand_permutation_inverse] =
MakeOperandStartIndexPermutations(attrs.scatter_dims_to_operand_dims(),
operand_rank);
auto update_permutation = MakeUpdatePermutation(operand_permutation);
TF_ASSIGN_OR_RETURN(auto* scatter_indices,
TransformStartIndices(scatter->scatter_indices(),
attrs.index_vector_dim()));
TF_ASSIGN_OR_RETURN(
auto scatter_updates,
TransformScatterUpdates(scatter, update_permutation,
scatter_indices->shape().dimensions(0)));
TF_ASSIGN_OR_RETURN(
auto scatter_operands,
MaybeTranspose(scatter->scatter_operands(), operand_permutation));
auto dim_numbers = MakeScatterDimensionNumbers(
operand_rank, attrs.scatter_dims_to_operand_dims().size());
Shape output_shape;
if (scatter_operands.size() == 1) {
output_shape = scatter_operands.front()->shape();
} else {
std::vector<Shape> shapes;
shapes.reserve(scatter_operands.size());
for (auto* operand : scatter_operands) {
shapes.push_back(operand->shape());
}
output_shape = ShapeUtil::MakeTupleShape(shapes);
}
auto* result = scatter->AddInstruction(HloInstruction::CreateScatter(
output_shape, scatter_operands, scatter_indices, scatter_updates,
scatter->called_computations().front(), dim_numbers,
scatter->indices_are_sorted(), scatter->unique_indices()));
if (IsIdentityPermutation(operand_permutation)) {
return result;
}
if (scatter->scatter_operands().size() == 1) {
return MaybeTranspose(result, operand_permutation_inverse);
}
std::vector<HloInstruction*> result_items;
result_items.reserve(scatter->scatter_operands().size());
for (int i = 0; i < scatter->scatter_operands().size(); ++i) {
TF_ASSIGN_OR_RETURN(result_items.emplace_back(),
MakeGetTupleElementHlo(result, i));
TF_ASSIGN_OR_RETURN(
result_items.back(),
MaybeTranspose(result_items.back(), operand_permutation_inverse));
}
return MaybeMakeTuple(result_items);
}
bool ScatterSimplifier::IsSimplifiedScatter(
const HloScatterInstruction* scatter) {
const auto& dims = scatter->scatter_dimension_numbers();
bool nonstandard_index_vector_dim =
dims.index_vector_dim() != scatter->scatter_indices()->shape().rank() - 1;
int64_t num_scatter_dims =
scatter->scatter_updates().front()->shape().rank() -
dims.update_window_dims().size();
bool scatter_indices_reordered =
!IsIdentityPermutation(dims.scatter_dims_to_operand_dims());
bool scatter_dim_not_first =
absl::c_linear_search(dims.update_window_dims(), 0);
bool update_window_dims_sorted = absl::c_is_sorted(dims.update_window_dims());
return !(nonstandard_index_vector_dim || num_scatter_dims > 1 ||
scatter_indices_reordered || scatter_dim_not_first ||
!update_window_dims_sorted || !dims.inserted_window_dims().empty());
}
bool ScatterSimplifier::InstructionMatchesPattern(HloInstruction* inst) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return scatter && !IsSimplifiedScatter(scatter);
}
} | #include "xla/service/scatter_simplifier.h"
#include <optional>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ScatterSimplifierTest : public HloTestBase {};
TEST_F(ScatterSimplifierTest, InsertsIndexVectorAndWindowDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = f32[3,3] parameter(0)
operand1 = f32[3,3] parameter(1)
indices = s32[2] parameter(2)
update0 = f32[2,3] parameter(3)
update1 = f32[2,3] parameter(4)
ROOT scatter = (f32[3,3], f32[3,3]) scatter(operand0, operand1, indices,
update0, update1),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[SCATTER_DIMS_WITH_VECTOR:.*]] = s32[2,1]{1,0} reshape(%indices)
CHECK: %[[RESHAPED_UPDATES0:.*]] = f32[2,1,3]{2,1,0} reshape(%update0)
CHECK: %[[RESHAPED_UPDATES1:.*]] = f32[2,1,3]{2,1,0} reshape(%update1)
CHECK: ROOT %scatter = (f32[3,3]{1,0}, f32[3,3]{1,0}) scatter(
CHECK-SAME: %operand0, %operand1, %[[SCATTER_DIMS_WITH_VECTOR]],
CHECK-SAME: %[[RESHAPED_UPDATES0]], %[[RESHAPED_UPDATES1]]),
CHECK-SAME: update_window_dims={1,2},
CHECK-SAME: inserted_window_dims={},
CHECK-SAME: scatter_dims_to_operand_dims={0},
CHECK-SAME: index_vector_dim=1,
CHECK-SAME: to_apply=%scatter_computation
)");
}
TEST_F(ScatterSimplifierTest, CollapsesScatterDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,1,2] parameter(1)
update = f32[2,1,1,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={2, 3},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=2
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[RESHAPED_INDICES:.*]] = s32[2,2]{1,0} reshape(%indices)
CHECK: %[[RESHAPED_UPDATES:.*]] = f32[2,1,3]{2,1,0} reshape(%update)
CHECK: scatter(
CHECK-SAME: %[[RESHAPED_INDICES]]
CHECK-SAME: %[[RESHAPED_UPDATES]]
)");
}
TEST_F(ScatterSimplifierTest, NoOpForSimpleScatter) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
update = f32[2,1,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), std::nullopt);
}
TEST_F(ScatterSimplifierTest, MovesIndexVectorDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,1] parameter(1)
update = f32[1,3,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1, 2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[TRANSPOSED_INDICES:.*]] = s32[1,2]{1,0}
CHECK-SAME: transpose(%indices), dimensions={1,0}
CHECK: scatter(%operand, %[[TRANSPOSED_INDICES]], %update),
CHECK-SAME: index_vector_dim=1
)");
}
TEST_F(ScatterSimplifierTest, TransformsUpdatesAndOperandUsingScatterDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,4,5] parameter(0)
indices = s32[2,2] parameter(1)
update = f32[2,1,1,3] parameter(2)
ROOT scatter = f32[3,4,5] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1, 2, 3},
inserted_window_dims={},
scatter_dims_to_operand_dims={2,0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[T_OPERAND:.*]] = f32[5,3,4]{2,1,0} transpose(%operand),
CHECK-SAME: dimensions={2,0,1}
CHECK: %[[T_UPDATES:.*]] = f32[2,3,1,1]{3,2,1,0} transpose(%update),
CHECK-SAME: dimensions={0,3,1,2}
CHECK: %[[SCATTER:.*]] = {{.*}} scatter(
CHECK-SAME: %[[T_OPERAND]], %indices, %[[T_UPDATES]])
CHECK-SAME: scatter_dims_to_operand_dims={0,1},
CHECK: ROOT %{{.*}} = f32[3,4,5]
CHECK-SAME: transpose(%[[SCATTER]]), dimensions={1,2,0}
)");
}
TEST_F(ScatterSimplifierTest, MakesScatterDimensionsLeadingInUpdates) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3] parameter(0)
indices = s32[1,1] parameter(1)
update = f32[2,1] parameter(2)
ROOT scatter = f32[3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={0},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[TRANSPOSED_UPDATES:.*]] = f32[1,2]{1,0}
CHECK-SAME: transpose(%update), dimensions={1,0}
CHECK: scatter(
CHECK-SAME: %[[TRANSPOSED_UPDATES]]
CHECK-SAME: update_window_dims={1},
)");
}
TEST_F(ScatterSimplifierTest, ZeroDimScatterIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[4,4] parameter(0)
indices = s32[2] parameter(1)
update = f32[3,3] parameter(2)
ROOT scatter = f32[4,4]{1,0} scatter(operand, indices, update),
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
to_apply=scatter_computation
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: scatter(
)");
}
TEST_F(ScatterSimplifierTest,
IsSimplifiedScatterReturnsFalseForUnsortedWindowDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,2] parameter(0)
indices = s32[1,1] parameter(1)
update = f32[1,2,2] parameter(2)
ROOT scatter = f32[3,2] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={2,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
auto module = ParseAndReturnUnverifiedModule(kModuleStr).value();
auto scatter = module->entry_computation()->root_instruction();
EXPECT_FALSE(ScatterSimplifier::IsSimplifiedScatter(
Cast<HloScatterInstruction>(scatter)));
}
}
} |
1,881 | cpp | tensorflow/tensorflow | while_loop_all_reduce_code_motion | third_party/xla/xla/service/while_loop_all_reduce_code_motion.cc | third_party/xla/xla/service/while_loop_all_reduce_code_motion_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_ALL_REDUCE_CODE_MOTION_H_
#define XLA_SERVICE_WHILE_LOOP_ALL_REDUCE_CODE_MOTION_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class WhileLoopAllReduceCodeMotion : public HloModulePass {
public:
explicit WhileLoopAllReduceCodeMotion(bool enable_reduce_scatter = false)
: enable_reduce_scatter_(enable_reduce_scatter) {}
~WhileLoopAllReduceCodeMotion() override = default;
absl::string_view name() const override {
return "while-loop-all-reduce-code-motion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const bool enable_reduce_scatter_;
};
}
#endif
#include "xla/service/while_loop_all_reduce_code_motion.h"
#include <memory>
#include <optional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct AccumulationContext {
HloInstruction* accumulation_instruction;
HloInstruction* accumulation_buffer;
int64_t param_tuple_index;
std::optional<HloInstruction*> dynamic_slice;
std::optional<HloInstruction*> dynamic_update_slice;
};
struct MovableAllReduceContext {
bool is_movable;
std::vector<AccumulationContext> accumulation_contexts;
};
bool IsZero(const HloInstruction* hlo) {
if (hlo->IsConstant() && hlo->shape().rank() == 0 &&
hlo->literal().IsZero({})) {
return true;
}
if (hlo->opcode() == HloOpcode::kBroadcast) {
return IsZero(hlo->operand(0));
}
return false;
}
bool IsValueReplicatedWithinEachAllReduceGroup(
const HloInstruction& instruction, const ShapeIndex& index,
CollectiveOpGroupMode all_reduce_group_mode,
absl::Span<const ReplicaGroup> replica_groups, int num_replicas,
int num_partitions,
const std::unique_ptr<HloReplicationAnalysis>&
cross_replica_replication_analysis,
const std::unique_ptr<HloReplicationAnalysis>&
cross_partition_replication_analysis) {
VLOG(5) << "IsValueReplicatedWithinEachAllReduceGroup,"
<< " all_reduce_group_mode: "
<< CollectiveOpGroupModeToString(all_reduce_group_mode);
switch (all_reduce_group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
return cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups);
}
case CollectiveOpGroupMode::kCrossPartition: {
return cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups);
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
return (cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups)) &&
(cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis
->HloInstructionIsReplicatedAt(&instruction, index));
}
case CollectiveOpGroupMode::kFlattenedID: {
if (num_replicas == 1) {
return cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis
->HloInstructionIsReplicatedAt(&instruction, index,
replica_groups);
}
if (num_partitions == 1) {
return cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups);
}
return (cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index)) &&
(cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis
->HloInstructionIsReplicatedAt(&instruction, index));
}
}
}
HloInstruction* GetEffectiveScalar(HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kBroadcast) {
return nullptr;
}
HloInstruction* operand = instruction->mutable_operand(0);
if (!ShapeUtil::IsScalar(operand->shape())) {
return nullptr;
}
return operand;
}
MovableAllReduceContext IsAllReduceMovable(
HloAllReduceInstructionBase* all_reduce, HloComputation* while_body,
const std::unique_ptr<HloReplicationAnalysis>&
cross_replica_replication_analysis,
const std::unique_ptr<HloReplicationAnalysis>&
cross_partition_replication_analysis) {
VLOG(4) << "IsAllReduceMovable: " << all_reduce->ToString();
std::optional<ReductionKind> reduction_type =
MatchReductionComputation(all_reduce->to_apply());
const bool all_reduce_is_summation =
reduction_type.has_value() && *reduction_type == ReductionKind::SUM;
const absl::InlinedVector<PrimitiveType, 12> kSupportedTypes{
BF16, F16, F32, F64, S8, S16, S32, S64, U8, U16, U32, U64};
if (!absl::c_linear_search(kSupportedTypes,
all_reduce->shape().element_type()) ||
!all_reduce_is_summation) {
return MovableAllReduceContext{false,
{}};
}
CollectiveOpGroupMode all_reduce_group_mode =
GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(),
all_reduce->use_global_device_ids())
.value();
auto is_value_replicated_within_replica_group =
[&cross_replica_replication_analysis,
&cross_partition_replication_analysis, &all_reduce_group_mode,
all_reduce](const HloInstruction& instruction,
const ShapeIndex& index) -> bool {
bool is_replicated = IsValueReplicatedWithinEachAllReduceGroup(
instruction, index, all_reduce_group_mode, all_reduce->replica_groups(),
all_reduce->GetModule()->config().replica_count(),
all_reduce->GetModule()->config().num_partitions(),
cross_replica_replication_analysis,
cross_partition_replication_analysis);
VLOG(5) << "instruction: " << instruction.name()
<< " is_replicate: " << is_replicated;
return is_replicated;
};
struct BufferTupleIndex {
bool unsupported_operation{false};
std::optional<int64_t> tuple_index;
bool returned_from_computation{false};
std::optional<HloInstruction*> dynamic_slice;
std::optional<HloInstruction*> dynamic_update_slice;
};
const bool is_reduce_scatter =
all_reduce->opcode() == HloOpcode::kReduceScatter;
auto get_origin_tuple_index =
[is_reduce_scatter](HloInstruction* instruction) -> BufferTupleIndex {
VLOG(4) << "get_origin_tuple_index called on " << instruction->ToString();
BufferTupleIndex result;
while (!result.unsupported_operation) {
switch (instruction->opcode()) {
default: {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
break;
}
case HloOpcode::kBitcast:
case HloOpcode::kConvert:
case HloOpcode::kReshape:
case HloOpcode::kTranspose:
if (is_reduce_scatter) {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
} else {
instruction = instruction->mutable_operand(0);
}
break;
case HloOpcode::kGetTupleElement: {
if (result.tuple_index.has_value()) {
result.unsupported_operation = true;
} else {
result.tuple_index =
Cast<HloGetTupleElementInstruction>(instruction)->tuple_index();
instruction = instruction->mutable_operand(0);
}
break;
}
case HloOpcode::kDynamicSlice: {
if (is_reduce_scatter) {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
} else if (result.dynamic_slice.has_value()) {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< "), we do not yet support more than 1 dynamic-slices on"
<< " the accumulation buffer.";
result.unsupported_operation = true;
} else {
result.dynamic_slice = instruction;
instruction = instruction->mutable_operand(0);
}
break;
}
case HloOpcode::kParameter: {
int parameter_number =
Cast<HloParameterInstruction>(instruction)->parameter_number();
CHECK_EQ(parameter_number, 0);
break;
}
}
if (instruction->opcode() == HloOpcode::kParameter) {
break;
}
}
return result;
};
auto get_output_tuple_index =
[is_reduce_scatter](HloInstruction* instruction,
HloComputation* while_body) -> BufferTupleIndex {
VLOG(4) << "get_output_tuple_index called on " << instruction->ToString();
BufferTupleIndex result;
std::stack<HloInstruction*> to_visit;
to_visit.push(instruction);
while (!to_visit.empty() && !result.unsupported_operation) {
HloInstruction* instruction = to_visit.top();
to_visit.pop();
for (HloInstruction* user : instruction->users()) {
switch (user->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kConvert:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTranspose:
case HloOpcode::kSlice: {
if (is_reduce_scatter) {
result.unsupported_operation = true;
} else {
to_visit.push(user);
}
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (result.dynamic_update_slice.has_value() || is_reduce_scatter) {
result.unsupported_operation = true;
} else {
result.dynamic_update_slice = user;
to_visit.push(user);
}
break;
}
case HloOpcode::kTuple: {
if (result.tuple_index.has_value()) {
result.unsupported_operation = true;
} else {
result.tuple_index = user->operand_index(instruction);
if (while_body->root_instruction() == user) {
if (result.returned_from_computation) {
result.unsupported_operation = true;
}
result.returned_from_computation = true;
} else {
to_visit.push(user);
}
}
break;
}
default: {
VLOG(4) << "get_output_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
}
}
if (result.unsupported_operation) {
break;
}
}
}
return result;
};
auto is_buffer_used =
[&is_value_replicated_within_replica_group, is_reduce_scatter](
absl::Span<const AccumulationContext> accumulation_contexts,
HloComputation* while_body_computation) -> bool {
CHECK_EQ(while_body_computation->num_parameters(), 1);
HloInstruction* parameter_instruction =
while_body_computation->parameter_instruction(0);
for (const auto& accumulation : accumulation_contexts) {
HloInstruction* accumulation_instruction =
accumulation.accumulation_instruction;
int64_t tuple_index = accumulation.param_tuple_index;
std::stack<HloInstruction*> to_visit;
for (HloInstruction* user : parameter_instruction->users()) {
if (auto* gte = DynCast<HloGetTupleElementInstruction>(user)) {
if (gte->tuple_index() == tuple_index) {
to_visit.push(user);
}
} else {
return true;
}
}
while (!to_visit.empty()) {
HloInstruction* instruction = to_visit.top();
to_visit.pop();
for (HloInstruction* user : instruction->users()) {
VLOG(5) << "is_buffer_used, user: " << user->name();
switch (user->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kConvert:
case HloOpcode::kReshape:
case HloOpcode::kTranspose:
if (is_reduce_scatter) {
VLOG(4) << "buffer is used by " << user->ToString()
<< ", preventing the motion of reduce-scatter.";
return true;
}
to_visit.push(user);
break;
case HloOpcode::kSelect: {
if (((user->operand_index(instruction) == 1 &&
IsZero(user->operand(2))) ||
(user->operand_index(instruction) == 2 &&
IsZero(user->operand(1)))) &&
is_value_replicated_within_replica_group(*(user->operand(0)),
{})) {
to_visit.push(user);
} else {
return true;
}
break;
}
case HloOpcode::kAdd: {
if (user != accumulation_instruction) {
return true;
}
break;
}
case HloOpcode::kDynamicSlice: {
if (!accumulation.dynamic_slice.has_value() ||
user != *accumulation.dynamic_slice) {
return true;
}
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (!accumulation.dynamic_update_slice.has_value() ||
user != *accumulation.dynamic_update_slice) {
return true;
}
break;
}
default: {
VLOG(4) << "buffer is used by " << user->ToString()
<< ", preventing the motion of all-reduce.";
return true;
}
}
}
}
}
return false;
};
auto dus_matches_ds_offsets =
[](const HloInstruction& dynamic_slice,
const HloInstruction& dynamic_update_slice) -> bool {
if (dynamic_slice.operand_count() + 1 !=
dynamic_update_slice.operand_count()) {
return false;
}
for (int i = 1; i < dynamic_slice.operand_count(); ++i) {
if (dynamic_slice.operand(i) != dynamic_update_slice.operand(i + 1)) {
return false;
}
}
return true;
};
auto dus_indices_are_replicated =
[&is_value_replicated_within_replica_group](
const HloInstruction& dynamic_update_slice) -> bool {
for (int i = 2; i < dynamic_update_slice.operand_count(); ++i) {
if (!is_value_replicated_within_replica_group(
*dynamic_update_slice.operand(i), {})) {
return false;
}
}
return true;
};
std::vector<AccumulationContext> accumulation_contexts;
std::stack<HloInstruction*> to_visit;
bool is_all_reduce_movable = true;
to_visit.push(all_reduce);
while (!to_visit.empty() && is_all_reduce_movable) {
HloInstruction* instruction = to_visit.top();
to_visit.pop();
for (HloInstruction* user : instruction->users()) {
switch (user->opcode()) {
case HloOpcode::kConvert:
to_visit.push(user);
break;
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTranspose:
case HloOpcode::kSlice: {
if (is_reduce_scatter) {
is_all_reduce_movable = false;
} else {
to_visit.push(user);
}
break;
}
case HloOpcode::kSelect: {
bool is_select_ok = [&]() {
bool operand_1_match = user->operand_index(instruction) == 1 &&
IsZero(user->operand(2));
bool operand_2_match = user->operand_index(instruction) == 2 &&
IsZero(user->operand(1));
if (!operand_1_match && !operand_2_match) {
return false;
}
if (!is_reduce_scatter) {
return true;
}
HloInstruction* predicate = user->mutable_operand(0);
return GetEffectiveScalar(predicate) != nullptr;
}();
if (is_select_ok) {
to_visit.push(user);
} else {
is_all_reduce_movable = false;
}
break;
}
case HloOpcode::kAdd: {
int64_t buffer_index = 1 - user->operand_index(instruction);
HloInstruction* accumulation_buffer =
user->mutable_operand(buffer_index);
auto origin_buffer_tuple_index =
get_origin_tuple_index(accumulation_buffer);
if (origin_buffer_tuple_index.unsupported_operation) {
is_all_reduce_movable = false;
break;
}
auto output_buffer_tuple_index =
get_output_tuple_index(user, while_body);
if (!output_buffer_tuple_index.unsupported_operation &&
output_buffer_tuple_index.returned_from_computation &&
origin_buffer_tuple_index.tuple_index.has_value() &&
output_buffer_tuple_index.tuple_index.has_value() &&
origin_buffer_tuple_index.tuple_index ==
output_buffer_tuple_index.tuple_index &&
(origin_buffer_tuple_index.dynamic_slice.has_value() ==
output_buffer_tuple_index.dynamic_update_slice.has_value()) &&
(!origin_buffer_tuple_index.dynamic_slice.has_value() ||
(dus_matches_ds_offsets(
**origin_buffer_tuple_index.dynamic_slice,
**output_buffer_tuple_index.dynamic_update_slice) &&
dus_indices_are_replicated(
**output_buffer_tuple_index.dynamic_update_slice)))) {
accumulation_contexts.push_back(AccumulationContext{
user, accumulation_buffer,
*output_buffer_tuple_index.tuple_index,
origin_buffer_tuple_index.dynamic_slice,
output_buffer_tuple_index.dynamic_update_slice});
} else {
is_all_reduce_movable = false;
}
break;
}
default: {
VLOG(4) << "get_accumulation_contexts, all-reduce result is used "
<< " by " << user->ToString() << ", not movable.";
is_all_reduce_movable = false;
}
}
}
}
if (is_buffer_used(accumulation_contexts, while_body)) {
is_all_reduce_movable = false;
}
return MovableAllReduceContext{is_all_reduce_movable, accumulation_contexts};
}
struct WhileInitContext {
HloInstruction* while_init{nullptr};
absl::flat_hash_map<int, HloInstruction*> tuple_index_to_old_buffer;
};
WhileInitContext CreateNewWhileInit(
HloInstruction* old_while_instruction,
const HloInstructionMap<std::vector<AccumulationContext>>&
all_reduce_to_accumulations) {
HloInstruction* old_while_init = old_while_instruction->mutable_operand(0);
HloComputation* while_parent = old_while_instruction->parent();
std::vector<HloInstruction*> new_while_init_elements(
old_while_init->operand_count(), nullptr);
for (const auto& all_reduce_and_accumulations_pair :
all_reduce_to_accumulations) {
const std::vector<AccumulationContext>& accumulations =
all_reduce_and_accumulations_pair.second;
HloInstruction* loop_all_reduce = all_reduce_and_accumulations_pair.first;
for (auto& accumulation_context : accumulations) {
int64_t tuple_index = accumulation_context.param_tuple_index;
HloInstruction* old_buffer = old_while_init->mutable_operand(tuple_index);
const Shape& accumulation_shape =
loop_all_reduce->opcode() == HloOpcode::kAllReduce
? old_buffer->shape()
: loop_all_reduce->operand(0)->shape();
HloInstruction* new_buffer = while_parent->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromDimensions(
accumulation_shape.element_type(),
accumulation_shape.dimensions())));
new_while_init_elements[tuple_index] = new_buffer;
}
}
absl::flat_hash_map<int, HloInstruction*> tuple_index_to_old_buffer;
for (int i = 0; i < old_while_init->operand_count(); i++) {
if (!new_while_init_elements[i]) {
new_while_init_elements[i] = old_while_init->mutable_operand(i);
} else {
tuple_index_to_old_buffer[i] = old_while_init->mutable_operand(i);
}
}
HloInstruction* new_while_init = while_parent->AddInstruction(
HloInstruction::CreateTuple(new_while_init_elements));
return WhileInitContext{new_while_init, tuple_index_to_old_buffer};
}
absl::Status ChangeAccumulatorShapesInLoopBodies(
HloInstruction* old_while_instruction,
const HloInstructionMap<std::vector<AccumulationContext>>&
all_reduce_to_accumulations) {
HloComputation* body = old_while_instruction->while_body();
HloComputation* cond = old_while_instruction->while_condition();
absl::flat_hash_map<Shape, HloInstruction*> zeros;
auto create_zero_of_shape = [&zeros, body](const Shape& shape) {
auto it = zeros.find(shape);
if (it != zeros.end()) {
return it->second;
}
HloInstruction* zero = body->AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(shape)));
zeros[shape] = zero;
return zero;
};
for (const auto& [loop_reduce_scatter, accumulations] :
all_reduce_to_accumulations) {
if (loop_reduce_scatter->opcode() != HloOpcode::kReduceScatter) {
continue;
}
const Shape& accumulation_shape = loop_reduce_scatter->operand(0)->shape();
for (auto& accumulation_context : accumulations) {
const int64_t tuple_index = accumulation_context.param_tuple_index;
HloInstruction* param_body = body->parameter_instruction(0);
std::vector<Shape> element_shapes = param_body->shape().tuple_shapes();
element_shapes[tuple_index] = accumulation_shape;
*param_body->mutable_shape() = ShapeUtil::MakeTupleShape(element_shapes);
for (HloInstruction* user : param_body->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
HloGetTupleElementInstruction* gte =
Cast<HloGetTupleElementInstruction>(user);
if (gte->tuple_index() != tuple_index) {
continue;
}
*gte->mutable_shape() = accumulation_shape;
for (HloInstruction* gte_user : gte->users()) {
CHECK_EQ(gte_user->opcode(), HloOpcode::kAdd);
*gte_user->mutable_shape() = accumulation_shape;
}
}
std::vector<HloInstruction*> reduce_scatter_users =
loop_reduce_scatter->users(); | #include "xla/service/while_loop_all_reduce_code_motion.h"
#include <algorithm>
#include <array>
#include <iterator>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ::testing::Ne;
using ::testing::NotNull;
using ::testing::Property;
using ::testing::SizeIs;
class WhileLoopAllReduceCodeMotionTest : public HloTestBase {
public:
template <HloOpcode op>
HloInstruction* find_op(HloComputation* computation) {
return *std::find_if(computation->instructions().begin(),
computation->instructions().end(),
HloPredicateIsOp<op>);
}
};
TEST_F(WhileLoopAllReduceCodeMotionTest, AllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
ASSERT_THAT(moved_all_reduce, NotNull());
EXPECT_THAT(moved_all_reduce->operand(0), op::GetTupleElement());
EXPECT_EQ(DynCast<HloGetTupleElementInstruction>(
moved_all_reduce->mutable_operand(0))
->tuple_index(),
3);
EXPECT_THAT(moved_all_reduce, op::ReplicaGroups({{0, 1, 2, 3}}));
EXPECT_FALSE(moved_all_reduce->constrain_layout());
EXPECT_TRUE(moved_all_reduce->use_global_device_ids());
HloComputation* reduction_computation =
module->GetComputationWithName("reduction");
ASSERT_THAT(reduction_computation, NotNull());
EXPECT_EQ(moved_all_reduce->to_apply(), reduction_computation);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, ReduceScatterAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[4096, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024, 1024] reduce-scatter(f32[4096, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={0}
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %reduce-scatter, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[4096, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[4096, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopAllReduceCodeMotion{true}.Run(
module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::ReduceScatter())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
EXPECT_THAT(accumulation_buffer, op::Shape("f32[4096, 1024]"));
auto* moved_reduce_scatter = DynCast<HloReduceScatterInstruction>(
find_op<HloOpcode::kReduceScatter>(entry));
ASSERT_THAT(moved_reduce_scatter, NotNull());
EXPECT_THAT(moved_reduce_scatter->operand(0), op::GetTupleElement());
EXPECT_EQ(DynCast<HloGetTupleElementInstruction>(
moved_reduce_scatter->mutable_operand(0))
->tuple_index(),
3);
EXPECT_THAT(moved_reduce_scatter, op::ReplicaGroups({{0, 1, 2, 3}}));
EXPECT_FALSE(moved_reduce_scatter->constrain_layout());
EXPECT_TRUE(moved_reduce_scatter->use_global_device_ids());
HloComputation* reduction_computation =
module->GetComputationWithName("reduction");
ASSERT_THAT(reduction_computation, NotNull());
EXPECT_EQ(moved_reduce_scatter->to_apply(), reduction_computation);
}
TEST_F(WhileLoopAllReduceCodeMotionTest,
ReduceScatterAccumulateDisabledByDefault) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[4096, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024, 1024] reduce-scatter(f32[4096, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={0}
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %reduce-scatter, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[4096, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[4096, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, AllReduceSliceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[3, 1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024, 1024] get-tuple-element(%param), index=4
%gte.5 = f32[1024, 1024] get-tuple-element(%param), index=5
%all-reduce = f32[3, 1024, 1024] all-reduce(f32[3, 1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%slice.0 = f32[1, 1024, 1024] slice(f32[3, 1024, 1024] %all-reduce), slice={[0:1], [0:1024], [0:1024]}
%reshape.0 = f32[1024, 1024] reshape(f32[1, 1024, 1024] %slice.0)
%slice.1 = f32[1, 1024, 1024] slice(f32[3, 1024, 1024] %all-reduce), slice={[1:2], [0:1024], [0:1024]}
%reshape.1 = f32[1024, 1024] reshape(f32[1, 1024, 1024] %slice.1)
%slice.2 = f32[1, 1024, 1024] slice(f32[3, 1024, 1024] %all-reduce), slice={[2:3], [0:1024], [0:1024]}
%reshape.2 = f32[1024, 1024] reshape(f32[1, 1024, 1024] %slice.2)
%accumulation.0 = f32[1024, 1024] add(f32[1024, 1024] %reshape.0, f32[1024, 1024] %gte.3)
%accumulation.1 = f32[1024, 1024] add(f32[1024, 1024] %reshape.1, f32[1024, 1024] %gte.4)
%accumulation.2 = f32[1024, 1024] add(f32[1024, 1024] %reshape.2, f32[1024, 1024] %gte.5)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation.0, %accumulation.1, %accumulation.2)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[3, 1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer.0 = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%accumulation_buffer.1 = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%accumulation_buffer.2 = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[3, 1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer.0, f32[1024, 1024] %accumulation_buffer.1, f32[1024, 1024] %accumulation_buffer.2)
ROOT %while = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
std::vector<HloInstruction*> hoisted_all_reduces;
absl::c_copy_if(module->entry_computation()->instructions(),
std::back_inserter(hoisted_all_reduces),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_THAT(hoisted_all_reduces, SizeIs(3));
ASSERT_THAT(
hoisted_all_reduces,
Each(Pointee(Property(&HloInstruction::channel_id, Ne(std::nullopt)))));
absl::flat_hash_set<int> unique_channel_ids = {
hoisted_all_reduces[0]->channel_id().value(),
hoisted_all_reduces[1]->channel_id().value(),
hoisted_all_reduces[2]->channel_id().value()};
EXPECT_THAT(unique_channel_ids, SizeIs(3));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, AllReduceAccumulateUse) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
%gte_while = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
ROOT %multiply = f32[1024, 1024] multiply(f32[1024, 1024] %gte_while, f32[1024, 1024] %param.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* new_root = module->entry_computation()->root_instruction();
ASSERT_THAT(new_root, op::Multiply());
ASSERT_THAT(new_root->operand(0), op::GetTupleElement());
ASSERT_THAT(new_root->operand(0)->operand(0), op::Tuple());
EXPECT_THAT(new_root->operand(0)->operand(0)->operand(3), op::Add());
}
TEST_F(WhileLoopAllReduceCodeMotionTest, RepeatedlyAccumulatedAllReduce) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %gte.3)
%add.0 = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %accumulation)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %add.0)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, TypeCastAllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%convert.0 = bf16[1024, 1024] convert(f32[1024, 1024] %gte.2)
%all-reduce = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %convert.0), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%convert.1 = f32[1024, 1024] convert(bf16[1024, 1024] %all-reduce)
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %convert.1, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
EXPECT_THAT(moved_all_reduce, op::Shape("bf16[1024, 1024]"));
HloInstruction* add_delta_to_old_buffer = find_op<HloOpcode::kAdd>(entry);
ASSERT_THAT(add_delta_to_old_buffer, NotNull());
EXPECT_THAT(add_delta_to_old_buffer, op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(0),
op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(1),
op::Shape("f32[1024, 1024]"));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, SelectAllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024,1024] all-reduce(%gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%const.0 = f32[] constant(0)
%zeros = f32[1024,1024] broadcast(%const.0), dimensions={}
%predicates = pred[1024,1024] custom-call(), custom_call_target="something"
%select = f32[1024,1024] select(%predicates, %zeros, %all-reduce)
%accumulation = f32[1024,1024] add(%select, %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024,1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024,1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024,1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
EXPECT_THAT(moved_all_reduce, op::Shape("f32[1024,1024]"));
HloInstruction* add_delta_to_old_buffer = find_op<HloOpcode::kAdd>(entry);
ASSERT_THAT(add_delta_to_old_buffer, NotNull());
EXPECT_THAT(add_delta_to_old_buffer, op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(0),
op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(1),
op::Shape("f32[1024, 1024]"));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, SelectReduceScatterAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024,4096], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024,4096], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024,4096] get-tuple-element(%param), index=2
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024,1024] reduce-scatter(%gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={1}
%const.0 = f32[] constant(0)
%zeros = f32[1024,1024] broadcast(%const.0), dimensions={}
%scalarp = pred[] custom-call(), custom_call_target="something"
%predicates = pred[1024,1024] broadcast(%scalarp), dimensions={}
%select = f32[1024,1024] select(%predicates, %zeros, %reduce-scatter)
%accumulation = f32[1024,1024] add(%select, %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024,4096], f32[1024,1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024,4096] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024,1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 4096], f32[1024,1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 4096] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 4096], f32[1024,1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopAllReduceCodeMotion{true}.Run(
module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::ReduceScatter())));
HloInstruction* accumulation_b |
1,882 | cpp | tensorflow/tensorflow | hlo_dataflow_analysis | third_party/xla/xla/service/hlo_dataflow_analysis.cc | third_party/xla/xla/service/hlo_dataflow_analysis_test.cc | #ifndef XLA_SERVICE_HLO_DATAFLOW_ANALYSIS_H_
#define XLA_SERVICE_HLO_DATAFLOW_ANALYSIS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_phi_graph.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
struct HloOperandIndex {
using MyTuple = std::tuple<int64_t, const ShapeIndex&>;
template <typename H>
friend H AbslHashValue(H h, const HloOperandIndex& hlo_operand_index) {
return H::combine(std::move(h), hlo_operand_index.ToTuple());
}
friend bool operator==(const HloOperandIndex& lhs,
const HloOperandIndex& rhs) {
return lhs.ToTuple() == rhs.ToTuple();
}
bool operator!=(const HloOperandIndex& other) const {
return !(*this == other);
}
MyTuple ToTuple() const {
return std::make_tuple(operand_number, std::cref(operand_index));
}
int64_t operand_number;
ShapeIndex operand_index;
};
class HloDataflowAnalysis {
public:
using CanShareBuffer = std::function<std::optional<bool>(
const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index)>;
struct ForwardedOperand {
int64_t operand_number;
ShapeIndex operand_index;
};
using ForwardsValue = std::function<std::optional<ForwardedOperand>(
const HloInstruction* instr, const ShapeIndex& index)>;
static absl::StatusOr<std::unique_ptr<HloDataflowAnalysis>> Run(
const HloModule& module, bool ssa_form = false,
bool bitcast_defines_value = false,
const CanShareBuffer& can_share_buffer = nullptr,
const ForwardsValue& forwards_value = nullptr,
absl::flat_hash_set<absl::string_view> execution_threads = {});
bool ValueIsDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
const HloValue& GetValueDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
HloValue& GetValueDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index = {});
const InstructionValueSet& GetInstructionValueSet(
const HloInstruction* instruction) const;
InstructionValueSet& GetInstructionValueSet(
const HloInstruction* instruction);
HloValueSet GetFlattenedValueSet(const HloInstruction* instruction) const;
const HloValueSet& GetValueSet(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
const HloValueSet& GetValueSet(const HloPosition& position) const;
HloValueSet& GetValueSet(const HloPosition& position);
HloValueSet& GetValueSet(const HloInstruction* instruction,
const ShapeIndex& index = {});
const HloValue& GetUniqueValueAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
return GetValueSet(instruction, index).GetUniqueValue();
}
HloValue& GetUniqueValueAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) {
return GetValue(GetValueSet(instruction, index).GetUniqueValue().id());
}
const HloValue& GetValue(HloValue::Id value_id) const;
HloValue& GetValue(HloValue::Id value_id);
int64_t value_count() const { return values_.size(); }
const std::vector<HloValue*>& values() const { return values_vector_; }
const CallGraph& call_graph() const { return *call_graph_; }
std::string ToString() const;
bool DoesNotUseOperandBuffer(const HloInstruction* operand,
const ShapeIndex& index,
const HloInstruction* user) const;
bool CanShareOperandBufferWithUser(HloInstruction* operand,
const ShapeIndex& operand_index,
HloInstruction* user,
const ShapeIndex& user_index) const;
const HloModule& module() const { return module_; }
static bool IsInPlaceOperation(HloOpcode opcode);
static bool IsAsynchronousOperationStart(HloOpcode opcode);
static bool IsAsynchronousOperationDone(HloOpcode opcode);
static std::vector<std::pair<HloOperandIndex, ShapeIndex>>
GetInPlaceInputOutputPairs(const HloInstruction* instruction);
absl::Status Verify() const;
private:
static bool AreTransitiveUsesElementwiseOrTuple(const HloInstruction* inst);
HloDataflowAnalysis(const HloModule& module, bool ssa_form,
bool bitcast_defines_value,
const CanShareBuffer& can_share_buffer,
const ForwardsValue& forwards_value,
absl::flat_hash_set<absl::string_view> execution_threads);
void OptimizePhiValues();
HloValue* NewHloValue(HloInstruction* instruction, const ShapeIndex& index,
bool is_phi);
void MarkValueForDeletion(HloValue::Id value_id);
void DeleteMarkedValues();
absl::Status InitializeInstructionValueSets();
bool UpdateInstructionValueSet(HloInstruction* instruction);
bool UpdateBitcastValueSet(HloInstruction* bitcast);
bool UpdateCallValueSet(HloInstruction* call);
bool UpdateConditionalValueSet(HloInstruction* conditional);
bool UpdateCopyValueSet(HloInstruction* copy);
bool UpdateCustomCallValueSet(HloInstruction* custom_call);
bool UpdateDomainValueSet(HloInstruction* domain);
bool UpdateGetTupleElementValueSet(HloInstruction* gte);
bool UpdateParameterValueSet(HloInstruction* parameter);
bool UpdateAsyncStartValueSet(HloInstruction* async_start);
bool UpdateAsyncUpdateValueSet(HloInstruction* async_update);
bool UpdateAsyncDoneValueSet(HloInstruction* async_done);
bool UpdateCopyStartValueSet(HloInstruction* copy_start);
bool UpdateCopyDoneValueSet(HloInstruction* copy_done);
bool UpdateOptimizationBarrierValueSet(HloInstruction* barrier);
bool UpdateRecvDoneValueSet(HloInstruction* recv_done);
bool UpdateSendValueSet(HloInstruction* send);
bool UpdateTupleValueSet(HloInstruction* tuple);
bool UpdateWhileValueSet(HloInstruction* xla_while);
bool UpdateAddDependencyValueSet(HloInstruction* add_dependency);
bool UpdateAllGatherStartValueSet(HloInstruction* all_gather_start);
bool UpdateAllGatherDoneValueSet(HloInstruction* all_gather_done);
bool UpdateAllReduceDoneValueSet(HloInstruction* all_reduce_done);
bool UpdateCollectivePermuteStartValueSet(
HloInstruction* collective_permute_start);
bool UpdateCollectivePermuteDoneValueSet(
HloInstruction* collective_permute_done);
void Propagate();
bool Phi(HloInstruction* instruction,
absl::Span<const InstructionValueSet* const> inputs);
void UpdatePositionsOfValuesAt(
HloInstruction* instruction, const InstructionValueSet& new_value_set,
const InstructionValueSet* prev_value_set = nullptr);
const HloModule& module_;
const absl::flat_hash_set<absl::string_view> execution_threads_;
const bool ssa_form_;
const bool bitcast_defines_value_;
std::unique_ptr<CallGraph> call_graph_;
absl::flat_hash_map<HloValue::Id, std::unique_ptr<HloValue>> values_;
absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<InstructionValueSet>>
value_sets_;
std::vector<HloValue::Id> value_ids_to_delete_;
std::vector<HloValue*> values_vector_;
HloValue::Id next_value_id_ = 0;
PhiGraph phi_graph_;
CanShareBuffer can_share_buffer_ = nullptr;
ForwardsValue forwards_value_ = nullptr;
};
std::pair<const HloInstruction*, ShapeIndex> FollowTupleIndirection(
const HloInstruction* instruction, ShapeIndex operand_index);
}
#endif
#include "xla/service/hlo_dataflow_analysis.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
int64_t CalculatePostOrderScheduleHelper(
const HloComputation* comp, int64_t start_ordinal,
absl::flat_hash_map<HloInstruction*, int64_t>* ordinal_map) {
int64_t ordinal = start_ordinal;
for (HloInstruction* instruction : comp->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional) {
for (const HloComputation* called_computation :
instruction->called_computations()) {
ordinal = CalculatePostOrderScheduleHelper(called_computation, ordinal,
ordinal_map);
}
}
if (instruction->opcode() == HloOpcode::kWhile) {
ordinal = CalculatePostOrderScheduleHelper(instruction->while_condition(),
ordinal, ordinal_map);
ordinal = CalculatePostOrderScheduleHelper(instruction->while_body(),
ordinal, ordinal_map);
}
ordinal_map->insert({instruction, ordinal++});
}
return ordinal;
}
absl::flat_hash_map<HloInstruction*, int64_t> CalculatePostOrderSchedule(
const HloModule& module) {
absl::flat_hash_map<HloInstruction*, int64_t> map;
CalculatePostOrderScheduleHelper(module.entry_computation(), 0, &map);
return map;
}
}
using absl::StrAppend;
using absl::StrCat;
HloDataflowAnalysis::HloDataflowAnalysis(
const HloModule& module, bool ssa_form, bool bitcast_defines_value,
const CanShareBuffer& can_share_buffer, const ForwardsValue& forwards_value,
absl::flat_hash_set<absl::string_view> execution_threads)
: module_(module),
execution_threads_(std::move(execution_threads)),
ssa_form_(ssa_form),
bitcast_defines_value_(bitcast_defines_value),
call_graph_(CallGraph::Build(&module)),
can_share_buffer_(can_share_buffer),
forwards_value_(forwards_value) {}
bool HloDataflowAnalysis::AreTransitiveUsesElementwiseOrTuple(
const HloInstruction* inst) {
absl::flat_hash_set<const HloInstruction*> visited;
absl::InlinedVector<const HloInstruction*, 4> stack;
stack.push_back(inst);
while (!stack.empty()) {
const HloInstruction* current = stack.back();
stack.pop_back();
visited.insert(current);
for (const HloInstruction* user : current->users()) {
for (const int64_t use_index : user->OperandIndices(current)) {
if (!user->IsElementwiseOnOperand(use_index) &&
user->opcode() != HloOpcode::kTuple) {
return false;
}
}
if (!visited.contains(user)) {
stack.push_back(user);
}
}
}
return true;
}
namespace {
bool Is1dSliceWithoutStrides(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kSlice &&
1 == instr->slice_starts().size() &&
1 == instr->slice_limits().size() &&
1 == instr->slice_strides().size() &&
1 == instr->slice_strides().at(0);
}
bool IsSliceInputFusion(const HloInstruction& unnested_hlo) {
if (!unnested_hlo.IsInputFusion()) {
return false;
}
const HloInstruction* root = unnested_hlo.fused_expression_root();
if (root->opcode() != HloOpcode::kTuple) {
return false;
}
return absl::c_all_of(root->operands(), [](const HloInstruction* instr) {
return Is1dSliceWithoutStrides(instr);
});
}
struct ConcatUsageInfo {
const HloInstruction* prev_concat;
int64_t concat_opnd_idx;
const HloInstruction* slice_to_recover_opnd;
};
std::optional<ConcatUsageInfo> ConcatIsEffectivelyElementwise(
const HloInstruction& concat, const HloInstruction& operand,
const ConcatUsageInfo& info) {
std::vector<HloInstruction*> users = concat.users();
if (!absl::c_all_of(users, Is1dSliceWithoutStrides)) {
return std::optional<ConcatUsageInfo>();
}
if (users.size() != concat.operand_count() ||
concat.operand_count() != concat.unique_operands().size()) {
return std::optional<ConcatUsageInfo>();
}
absl::c_sort(users, [](const HloInstruction* a, const HloInstruction* b) {
return a->slice_starts().at(0) < b->slice_starts().at(0);
});
int64_t prev_limit = 0;
for (int64_t i = 0; i < users.size(); ++i) {
const HloInstruction* u = users[i];
int64_t slice_size = u->slice_limits().at(0) - u->slice_starts().at(0);
if (u->slice_starts().at(0) != prev_limit ||
slice_size != ShapeUtil::ElementsIn(concat.operand(i)->shape())) {
return std::optional<ConcatUsageInfo>();
}
prev_limit = u->slice_limits().at(0);
}
int64_t operand_idx = concat.operand_index(&operand);
if (info.prev_concat != nullptr) {
bool is_concat_identical = info.prev_concat->Identical(
concat,
[](const HloInstruction*, const HloInstruction*) {
return true;
});
if (!is_concat_identical || info.concat_opnd_idx != operand_idx) {
return std::optional<ConcatUsageInfo>();
}
}
const HloInstruction* slice_to_recover_opnd = users.at(operand_idx);
return std::optional<ConcatUsageInfo>(
ConcatUsageInfo{&concat, operand_idx, slice_to_recover_opnd});
}
bool AreTransitiveUsesEffectivelyElementwise(const HloInstruction* param,
const HloInstruction* root_tuple,
const ShapeIndex& out_shape_idx) {
CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple);
CHECK_EQ(out_shape_idx.size(), 1);
absl::flat_hash_set<const HloInstruction*> visited;
absl::InlinedVector<const HloInstruction*, 4> stack;
stack.push_back(param);
ConcatUsageInfo concat_usage_info{nullptr, 0, nullptr};
bool is_output_reachable = false;
while (!stack.empty()) {
const HloInstruction* current = stack.back();
stack.pop_back();
visited.insert(current);
for (const HloInstruction* user : current->users()) {
VLOG(3) << "Visiting: " << user->ToString();
switch (user->opcode()) {
case HloOpcode::kTuple:
if (user == root_tuple &&
current == root_tuple->operand(out_shape_idx.back())) {
is_output_reachable = true;
}
break;
case HloOpcode::kReshape:
if (!ShapeUtil::ReshapeIsBitcast(current->shape(), user->shape())) {
return false;
}
break;
case HloOpcode::kConcatenate: {
std::optional<ConcatUsageInfo> optional_concat_info =
ConcatIsEffectivelyElementwise(*user, *current,
concat_usage_info);
if (!optional_concat_info) {
return false;
}
concat_usage_info = *optional_concat_info;
CHECK(!visited.contains(concat_usage_info.slice_to_recover_opnd));
stack.push_back(concat_usage_info.slice_to_recover_opnd);
continue;
}
default:
for (const int64_t use_index : user->OperandIndices(current)) {
if (!user->IsElementwiseOnOperand(use_index)) {
return false;
}
}
if (!LayoutUtil::Equal(current->shape().layout(),
user->shape().layout())) {
return false;
}
break;
}
if (!visited.contains(user)) {
stack.push_back(user);
}
}
}
return is_output_reachable;
}
}
bool HloDataflowAnalysis::ValueIsDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
const HloValueSet& value_set = GetValueSet(instruction, index);
if (value_set.values().size() != 1) {
return false;
}
return value_set.GetUniqueValue().defining_instruction() == instruction;
}
const HloValue& HloDataflowAnalysis::GetValueDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
CHECK(ValueIsDefinedAt(instruction, index)) << instruction->ToString();
return GetUniqueValueAt(instruction, index);
}
HloValue& HloDataflowAnalysis::GetValueDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) {
CHECK(ValueIsDefinedAt(instruction, index));
return GetUniqueValueAt(instruction, index);
}
HloValue* HloDataflowAnalysis::NewHloValue(HloInstruction* instruction,
const ShapeIndex& index,
bool is_phi) {
const int64_t value_id = next_value_id_++;
auto result =
values_.insert({value_id, std::make_unique<HloValue>(
value_id, instruction, index, is_phi)});
CHECK(result.second);
VLOG(4) << "NewHloValue = " << result.first->second->ToShortString();
return result.first->second.get();
}
void HloDataflowAnalysis::MarkValueForDeletion(HloValue::Id value_id) {
const HloValue& value = GetValue(value_id);
VLOG(4) << "MarkValueForDeletion(" << value.ToShortString() << ")";
value_ids_to_delete_.push_back(value_id);
}
void HloDataflowAnalysis::DeleteMarkedValues() {
absl::flat_hash_set<HloValue::Id> id_set(value_ids_to_delete_.begin(),
value_ids_to_delete_.end());
#ifndef NDEBUG
for (const auto& pair : value_sets_) {
const HloInstruction* instruction = pair.first;
const InstructionValueSet& instruction_value_set = *pair.second;
for (const auto& index_value_set : instruction_value_set) {
const HloValueSet& value_set = index_value_set.second;
for (const HloValue* value : value_set.values()) {
DCHECK(!ContainsKey(id_set, value->id()))
<< "Value " << value->ToShortString()
<< " marked for deletion, but still exists in value set for "
"instruction "
<< instruction->name();
}
}
}
#endif
for (HloValue::Id value_id : id_set) {
values_.erase(value_id);
}
value_ids_to_delete_.clear();
}
std::string HloDataflowAnalysis::ToString() const {
std::string out =
StrCat("HloDataflowAnalysis, module ", module_.name(), "\n");
StrAppend(&out, " Instruction value sets:\n");
for (const HloComputation* computation : module_.computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(), | #include "xla/service/hlo_dataflow_analysis.h"
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
class HloDataflowAnalysisTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
protected:
HloDataflowAnalysisTest() : module_(CreateNewVerifiedModule()) {}
const HloDataflowAnalysis& RunAnalysis(bool ssa_form,
bool bitcast_defines_value = false,
bool run_dce = true) {
if (run_dce) {
HloDCE dce;
EXPECT_TRUE(dce.Run(module_.get()).ok());
}
FlattenCallGraph flatten;
EXPECT_TRUE(flatten.Run(module_.get()).ok());
analysis_ =
HloDataflowAnalysis::Run(*module_, ssa_form, bitcast_defines_value)
.value();
return *analysis_;
}
const std::vector<const HloValue*>& HloValuesAt(
const HloInstruction* instruction, const ShapeIndex& index = {}) {
CHECK(analysis_ != nullptr);
return analysis_->GetValueSet(instruction, index).values();
}
bool InstructionsMayInterfere(const HloOrdering& ordering,
const HloInstruction* a,
const HloInstruction* b) {
EXPECT_FALSE(a->shape().IsTuple());
EXPECT_FALSE(b->shape().IsTuple());
return ordering.MayInterfere(analysis_->GetValueDefinedAt(a),
analysis_->GetValueDefinedAt(b), *analysis_);
}
std::unique_ptr<HloComputation> CreateR0F32UnaryOpComputation(
HloOpcode opcode) {
HloComputation::Builder builder(
absl::StrCat(TestName(), ".", HloOpcodeString(opcode)));
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, opcode, param0));
return builder.Build();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloDataflowAnalysis> analysis_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
const Shape vector_shape_ = ShapeUtil::MakeShape(F32, {42});
const Shape tuple_shape_ = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})});
};
TEST_P(HloDataflowAnalysisTest, BinaryOperation) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).positions(),
UnorderedElementsAre(HloPosition{constant1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).positions(),
UnorderedElementsAre(HloPosition{constant2, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(add).positions(),
UnorderedElementsAre(HloPosition{add, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{add, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{add, 1, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).GetUses().empty());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, TupleAndGtes) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param0));
EXPECT_TRUE(analysis.ValueIsDefinedAt(param1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(gte0));
EXPECT_FALSE(analysis.ValueIsDefinedAt(gte1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_THAT(
analysis.GetValueDefinedAt(param0).positions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{gte0, {}}));
EXPECT_THAT(
analysis.GetValueDefinedAt(param1).positions(),
UnorderedElementsAre(HloPosition{param1, {}}, HloPosition{tuple, {1}},
HloPosition{gte1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(tuple).positions(),
UnorderedElementsAre(HloPosition{tuple, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(param0).GetUses(),
UnorderedElementsAre(HloUse{add, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(param1).GetUses(),
UnorderedElementsAre(HloUse{add, 1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(tuple, {}).GetUses(),
UnorderedElementsAre(HloUse{gte0, 0, {}}, HloUse{gte1, 0, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, NestedTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto nested_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({tuple, tuple, constant1}));
auto gte_tuple = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple->shape(), nested_tuple, 1));
auto gte_out = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, gte_tuple, 0));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_THAT(
analysis.GetValueDefinedAt(constant1).positions(),
UnorderedElementsAre(
HloPosition{constant1, {}}, HloPosition{tuple, {0}},
HloPosition{nested_tuple, {0, 0}}, HloPosition{nested_tuple, {1, 0}},
HloPosition{nested_tuple, {2}}, HloPosition{gte_tuple, {0}},
HloPosition{gte_out, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1, {}).GetUses(),
UnorderedElementsAre(HloUse{gte_out, 0, {0}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).GetUses().empty());
EXPECT_THAT(analysis.GetValueDefinedAt(tuple, {}).GetUses(),
UnorderedElementsAre(HloUse{gte_out, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(nested_tuple, {}).GetUses(),
UnorderedElementsAre(HloUse{gte_tuple, 0, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
EXPECT_FALSE(
analysis.GetValueDefinedAt(tuple, {}).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(nested_tuple, {})
.live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, SingleCall) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_FALSE(analysis.ValueIsDefinedAt(subparam0));
EXPECT_FALSE(analysis.ValueIsDefinedAt(subparam1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_FALSE(analysis.ValueIsDefinedAt(call));
EXPECT_EQ(analysis.GetUniqueValueAt(subparam0),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(subparam1),
analysis.GetValueDefinedAt(constant2));
EXPECT_EQ(analysis.GetUniqueValueAt(call), analysis.GetValueDefinedAt(add));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{add, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{add, 1, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, NestedCalls) {
auto inner_builder = HloComputation::Builder("InnerComputation");
auto inner_param0 = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto inner_param1 = inner_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_param0, inner_param1));
HloComputation* inner_computation =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("OuterComputation");
auto outer_param0 = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto outer_param1 = outer_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto nested_call = outer_builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {outer_param1, outer_param0}, inner_computation));
HloComputation* outer_computation =
module_->AddEmbeddedComputation(outer_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, outer_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_THAT(
analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{nested_call, 1, {}},
HloUse{add, 1, {}}));
EXPECT_THAT(
analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{nested_call, 0, {}},
HloUse{add, 0, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, SingleWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_root = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_FALSE(analysis.GetValueDefinedAt(cond_constant).live_out_of_module());
if (ssa_form) {
EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, {1}).is_phi());
EXPECT_THAT(
analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{add, 0, {}}, HloUse{body_root, 0, {}},
HloUse{xla_while, 0, {0}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {1})
.live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(add).live_out_of_module());
} else {
EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
}
TEST_P(HloDataflowAnalysisTest, SequentialWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
auto xla_while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0));
auto xla_while2 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.GetUniqueValueAt(xla_while0, {0}),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(xla_while1, {0}),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(xla_while2, {0}),
analysis.GetValueDefinedAt(constant1));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, MultiLevelNestedWhile) {
const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_});
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto level0_builder = HloComputation::Builder("level0_body");
auto level0_param = level0_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto level0_element_0 = level0_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, level0_param, 0));
auto level0_root = level0_builder.AddInstruction(
HloInstruction::CreateTuple({level0_element_0}));
HloComputation* level0_body =
module_->AddEmbeddedComputation(level0_builder.Build());
auto level1_builder = HloComputation::Builder("level1_body");
auto level1_param = level1_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto level1_root = level1_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition, level0_body, level1_param));
HloComputation* level1_body =
module_->AddEmbeddedComputation(level1_builder.Build());
auto level2_builder = HloComputation::Builder("level2_body");
auto level2_param = level2_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto level2_while = level2_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition, level1_body, level2_param));
auto level2_element_0 = level2_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, level2_while, 0));
auto negate = level2_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, level2_element_0));
level2_builder.AddInstruction(HloInstruction::CreateTuple({negate}));
HloComputation* level2_body =
module_->AddEmbeddedComputation(level2_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({constant1}));
builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, level2_body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
if (!ssa_form) {
return;
}
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_FALSE(analysis.ValueIsDefinedAt(level1_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(level0_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(level1_root, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(level0_root, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level1_param, {0}),
HloValuesAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level0_param, {0}),
HloValuesAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level1_root, {0}),
HloValuesAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level0_root, {0}),
HloValuesAt(level2_param, {0}));
}
TEST_P(HloDataflowAnalysisTest, NestedWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto inner_builder = HloComputation::Builder("inner_body");
auto inner_param = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto inner_element_0 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0));
auto inner_element_1 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1));
inner_builder.AddInstruction(
HloInstruction::CreateTuple({inner_element_0, add}));
HloComputation* inner_body =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("outer_body");
auto outer_param = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto outer_element_0 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 0));
auto negate = outer_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, outer_element_0));
auto outer_element_1 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 1));
auto outer_tuple = outer_builder.AddInstruction(
HloInstruction::CreateTuple({negate, outer_element_1}));
auto nested_while = outer_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition, inner_body, outer_tuple));
HloComputation* outer_body =
module_->AddEmbeddedComputation(outer_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({ |
1,883 | cpp | tensorflow/tensorflow | tuple_simplifier | third_party/xla/xla/service/tuple_simplifier.cc | third_party/xla/xla/service/tuple_simplifier_test.cc | #ifndef XLA_SERVICE_TUPLE_SIMPLIFIER_H_
#define XLA_SERVICE_TUPLE_SIMPLIFIER_H_
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class TupleSimplifier : public HloModulePass {
public:
TupleSimplifier() : TupleSimplifier(false) {}
explicit TupleSimplifier(bool exclude_entry_computation);
~TupleSimplifier() override {}
absl::string_view name() const override { return "tuple-simplifier"; }
using HloPassInterface::Run;
using HloPassInterface::RunOnModuleGroup;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool exclude_entry_computation_;
absl::StatusOr<bool> RemoveWholeTuple(HloInstruction* tuple);
};
}
#endif
#include "xla/service/tuple_simplifier.h"
#include <queue>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
TupleSimplifier::TupleSimplifier(bool exclude_entry_computation)
: exclude_entry_computation_(exclude_entry_computation) {}
absl::StatusOr<bool> TupleSimplifier::RemoveWholeTuple(HloInstruction* tuple) {
HloInstruction* top_tuple = nullptr;
for (int64_t operand_number = 0; operand_number < tuple->operand_count();
++operand_number) {
HloInstruction* operand = tuple->mutable_operand(operand_number);
if (operand->opcode() != HloOpcode::kGetTupleElement ||
operand->tuple_index() != operand_number) {
return false;
}
if (top_tuple == nullptr) {
top_tuple = operand->mutable_operand(0);
if (!ShapeUtil::Compatible(top_tuple->shape(), tuple->shape())) {
return false;
}
} else if (top_tuple != operand->operand(0)) {
return false;
}
}
if (top_tuple == nullptr) {
return false;
}
TF_ASSIGN_OR_RETURN(bool changed,
tuple->parent()->ReplaceInstruction(
tuple, top_tuple, true));
return changed;
}
absl::StatusOr<bool> TupleSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
if (exclude_entry_computation_ &&
computation == module->entry_computation()) {
continue;
}
for (auto* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kTuple) {
TF_ASSIGN_OR_RETURN(bool c, RemoveWholeTuple(instruction));
changed |= c;
} else {
auto ancestor = instruction->LatestNonGteAncestorAndIndex();
if (ancestor.first == instruction) {
continue;
}
HloInstruction* replacement = ancestor.first;
for (int i = 0; i < ancestor.second.size(); ++i) {
if (replacement->opcode() != HloOpcode::kTuple) {
replacement = nullptr;
break;
}
replacement = replacement->mutable_operand(ancestor.second[i]);
}
if (replacement) {
TF_ASSIGN_OR_RETURN(bool replaced,
computation->ReplaceInstruction(
instruction, replacement,
true,
true));
changed |= replaced;
}
}
}
}
return changed;
}
} | #include "xla/service/tuple_simplifier.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class TupleSimplifierTest : public HloTestBase {
protected:
void Run(HloModule* module, bool change_expected) {
auto changed_status = RunHloPass(TupleSimplifier(), module);
TF_ASSERT_OK(changed_status.status());
EXPECT_EQ(change_expected, changed_status.value());
}
void Run(HloModule* module, bool change_expected, bool exclude_entry) {
auto changed_status = RunHloPass(TupleSimplifier(exclude_entry), module);
TF_ASSERT_OK(changed_status.status());
EXPECT_EQ(change_expected, changed_status.value());
}
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
const Shape tuple_shape_ = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(F32, {})});
};
TEST_F(TupleSimplifierTest, TupleOfParameters) {
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "param2"));
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1, param2}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, GteOfTupleOfParameter) {
HloComputation::Builder builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "param"));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, GteOfTuple) {
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "param2"));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({param0, param1, param2}));
HloInstruction* gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(), gte);
Run(module.get(), true);
EXPECT_THAT(computation->root_instruction(), param1);
}
TEST_F(TupleSimplifierTest, GteOfTupleChain) {
HloComputation::Builder builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
const int kChainLength = 10;
HloInstruction* element = param;
for (int i = 0; i < kChainLength; ++i) {
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({element, element, element}));
element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
}
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, element));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
op::Negate(op::GetTupleElement(op::Tuple())));
Run(module.get(), true);
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Parameter()));
}
TEST_F(TupleSimplifierTest, NestedGteOfTuples) {
HloComputation::Builder builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
const int kNestingDepth = 5;
HloInstruction* nested_tuple = param;
for (int i = 0; i < kNestingDepth; ++i) {
nested_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({nested_tuple, nested_tuple}));
}
HloInstruction* element = nested_tuple;
for (int i = 0; i < kNestingDepth; ++i) {
element = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(element->shape(), 0), element, 0));
}
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(), element);
Run(module.get(), true);
EXPECT_THAT(computation->root_instruction(), param);
}
TEST_F(TupleSimplifierTest, TupleOfGteInstructions) {
HloComputation::Builder builder(TestName());
HloInstruction* tuple_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "param"));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple_param, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple_param, 1));
HloInstruction* gte2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple_param, 2));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1, gte2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(), tuple);
Run(module.get(), true);
EXPECT_THAT(computation->root_instruction(), tuple_param);
}
TEST_F(TupleSimplifierTest, IncompatibleTuples) {
HloComputation::Builder builder(TestName());
HloInstruction* tuple_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "param"));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple_param, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple_param, 1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(), tuple);
Run(module.get(), false);
EXPECT_THAT(computation->root_instruction(), tuple);
}
TEST_F(TupleSimplifierTest, CanExcludeEntryComputation) {
auto module = CreateNewVerifiedModule();
HloInstruction* p0;
HloInstruction* p1;
HloComputation* c0;
HloComputation* c1;
HloComputation* entry;
{
HloComputation::Builder builder(TestName() + "_1");
p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "param"));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, p0, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, p0, 1));
HloInstruction* gte2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, p0, 2));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1, gte2}));
c0 = module->AddEmbeddedComputation(builder.Build());
}
{
HloComputation::Builder builder(TestName() + "_2");
p1 = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "param"));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, p1, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, p1, 1));
HloInstruction* gte2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, p1, 2));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1, gte2}));
c1 = module->AddEmbeddedComputation(builder.Build());
}
{
HloComputation::Builder builder(TestName() + "_Entry");
HloInstruction* tuple_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "param"));
HloInstruction* call0 = builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape_, {tuple_param}, c0));
HloInstruction* call1 = builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape_, {tuple_param}, c1));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, call0, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, call1, 1));
HloInstruction* tuple0 =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
HloInstruction* gte2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple0, 0));
HloInstruction* gte3 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple0, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte2, gte3}));
entry = module->AddEntryComputation(builder.Build());
}
Run(module.get(), true, true);
EXPECT_THAT(c0->root_instruction(), p0);
EXPECT_THAT(c1->root_instruction(), p1);
EXPECT_THAT(entry->instruction_count(), 9);
}
TEST_F(TupleSimplifierTest, ShardingLoss) {
const char* kModuleStr = R"(
HloModule m
ENTRY test {
p0 = s32[10] parameter(0), sharding={devices=[2]0,1}
t = (s32[10]) tuple(p0)
ROOT %gte = s32[10] get-tuple-element(t), index=0, sharding={replicated}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
Run(m.get(), false);
}
TEST_F(TupleSimplifierTest, NestedTuple) {
const char* kModuleStr = R"(
HloModule m
ENTRY test {
p0 = s32[10] parameter(0), sharding={devices=[2]0,1}
p1 = s32[10] parameter(1), sharding={devices=[2]0,1}
p2 = s32[10] parameter(2), sharding={devices=[2]0,1}
p3 = s32[10] parameter(3), sharding={devices=[2]0,1}
t = (s32[10], s32[10]) tuple(p0, p1), sharding={{devices=[2]0,1}, {devices=[2]0,1}}
t2 = ((s32[10], s32[10]), s32[10]) tuple(t, p2), sharding={{devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}}
t3 = (((s32[10], s32[10]), s32[10]), s32[10]) tuple(t2, p3), sharding={{devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}}
gte0 = ((s32[10], s32[10]), s32[10]) get-tuple-element(t3), index=0, sharding={{replicated}, {replicated}, {replicated}}
gte1 = (s32[10], s32[10]) get-tuple-element(gte0), index=0, sharding={{replicated}, {replicated}}
gte2 = s32[10] get-tuple-element(gte1), index=1, sharding={devices=[2]0,1}
gte3 = s32[10] get-tuple-element(gte1), index=0, sharding={replicated}
ROOT to = (s32[10], s32[10]) tuple(gte2, gte3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
Run(m.get(), true);
auto* p1 = FindInstruction(m.get(), "p1");
auto* gte3 = FindInstruction(m.get(), "gte3");
EXPECT_THAT(m->entry_computation()->root_instruction()->operand(0), p1);
EXPECT_THAT(m->entry_computation()->root_instruction()->operand(1), gte3);
}
}
} |
1,884 | cpp | tensorflow/tensorflow | reshape_decomposer | third_party/xla/xla/service/reshape_decomposer.cc | third_party/xla/xla/service/reshape_decomposer_test.cc | #ifndef XLA_SERVICE_RESHAPE_DECOMPOSER_H_
#define XLA_SERVICE_RESHAPE_DECOMPOSER_H_
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReshapeDecomposer : public HloModulePass {
public:
absl::string_view name() const override { return "reshape-decomposer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/reshape_decomposer.h"
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
namespace {
class ReshapeDecomposerVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReshape(HloInstruction* reshape) override {
HloInstruction* operand = reshape->mutable_operand(0);
auto s = reshape->shape();
auto s0 = operand->shape();
if (ShapeUtil::ReshapeIsBitcast(s, s0)) {
auto b = MakeBitcastHlo(operand, s, &operand->metadata());
return ReplaceInstruction(reshape, b);
} else if (auto output_aligned_input_shape =
ShapeUtil::AlignLayouts(s, s0)) {
Shape new_input_shape = *output_aligned_input_shape;
HloInstruction* copied_operand = MakeCopyHlo(operand, new_input_shape);
VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical "
"transpose on the operand: "
<< copied_operand->ToString();
auto b = MakeBitcastHlo(copied_operand, s, &copied_operand->metadata());
TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, b));
DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));
} else if (auto input_aligned_output_shape =
ShapeUtil::AlignLayouts(s0, s)) {
Shape new_output_shape = *input_aligned_output_shape;
auto b = MakeBitcastHlo(operand, new_output_shape, &operand->metadata());
DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));
HloInstruction* copied_result = MakeCopyHlo(b, s);
VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical "
"transposition on the result: "
<< copied_result->ToString();
TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, copied_result));
} else {
VLOG(3) << "Both input and output of reshape are not alignable, create "
"two physical transposes";
auto s0_normalized = ShapeUtil::MakeShapeWithDescendingLayout(
s0.element_type(), s0.dimensions());
auto c1 = MakeCopyHlo(reshape->mutable_operand(0), s0_normalized);
auto s_normalized = ShapeUtil::MakeShapeWithDescendingLayout(
s.element_type(), s.dimensions());
auto b = MakeBitcastHlo(c1, s_normalized, &c1->metadata());
DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));
auto c2 = MakeCopyHlo(b, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, c2));
}
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> ReshapeDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return ReshapeDecomposerVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/reshape_decomposer.h"
#include <memory>
#include <optional>
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ReshapeDecomposerTest : public HloTestBase {
public:
void CheckReshapeDecomposer(const char* hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo, ReshapeDecomposer{}, expected,
[&](HloModule* module) {
EXPECT_TRUE(absl::c_all_of(
module->entry_computation()->instructions(),
[&](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kReshape ||
ShapeUtil::ReshapeIsBitcast(instr->operand(0)->shape(),
instr->shape());
}));
});
}
};
TEST_F(ReshapeDecomposerTest, IsBitcast) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[8]{0} parameter(0)
ROOT r = f32[4,2]{1,0} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
TEST_F(ReshapeDecomposerTest, AlignableOutput) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[8,3]{1,0} parameter(0)
ROOT r = f32[4,2,3]{0,1,2} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
TEST_F(ReshapeDecomposerTest, AlignableInput) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2,3]{0,1,2} parameter(0)
ROOT r = f32[8,3]{1,0} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
TEST_F(ReshapeDecomposerTest, NotAlignable) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2,3,8]{0,2,1,3} parameter(0)
ROOT r = f32[8,3,2,4]{0,2,1,3} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
}
} |
1,885 | cpp | tensorflow/tensorflow | hlo_module_dce | third_party/xla/xla/service/hlo_module_dce.cc | third_party/xla/xla/service/hlo_module_dce_test.cc | #ifndef XLA_SERVICE_HLO_MODULE_DCE_H_
#define XLA_SERVICE_HLO_MODULE_DCE_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloModuleDCE : public HloModulePass {
public:
~HloModuleDCE() override {}
absl::string_view name() const override { return "hlo-module-dce"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/hlo_module_dce.h"
#include <deque>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_liveness_analysis.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
absl::StatusOr<bool> RunWhileDCE(
HloModule* module, HloLivenessAnalysis* liveness,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> while_body_comps_to_dce;
for (auto* computation : module->computations(execution_threads)) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
const auto* xla_while = instruction;
auto* while_body_comp = xla_while->while_body();
auto* while_body_param = while_body_comp->parameter_instruction(0);
auto* while_body_root = while_body_comp->root_instruction();
if (!xla_while->shape().IsTuple() ||
while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(1) << "WhileDCE SKIP while: " << xla_while->ToString();
continue;
}
const int64_t tuple_element_count =
ShapeUtil::TupleElementCount(xla_while->shape());
bool modified_while_body_comp = false;
for (int64_t i = 0; i < tuple_element_count; ++i) {
if (liveness->IsLive(xla_while, {i})) {
continue;
}
VLOG(1) << "WhileDCE Dead while tuple element."
<< " while: " << xla_while->name() << " tuple_index: " << i;
HloInstruction* pass_thru_gte = while_body_comp->AddInstruction(
HloInstruction::CreateGetTupleElement(
while_body_param->shape().tuple_shapes(i), while_body_param,
i));
TF_RETURN_IF_ERROR(
while_body_root->ReplaceOperandWith(i, pass_thru_gte));
changed = true;
modified_while_body_comp = true;
}
if (modified_while_body_comp) {
while_body_comps_to_dce.push_back(while_body_comp);
}
}
}
for (auto* while_body_comp : while_body_comps_to_dce) {
TF_ASSIGN_OR_RETURN(bool changed_for_computation,
HloDCE::RunOnComputation(
while_body_comp,
false));
changed |= changed_for_computation;
}
return changed;
}
}
absl::StatusOr<bool> HloModuleDCE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Before HloModuleDCE:";
XLA_VLOG_LINES(3, module->ToString());
std::unique_ptr<HloLivenessAnalysis> liveness;
TF_ASSIGN_OR_RETURN(liveness, HloLivenessAnalysis::Run(*module));
TF_ASSIGN_OR_RETURN(bool hlo_module_dce_changed,
RunWhileDCE(module, liveness.get(), execution_threads));
WhileLoopSimplifier while_loop_simplifier;
TF_ASSIGN_OR_RETURN(bool while_loop_simplifier_changed,
while_loop_simplifier.Run(module, execution_threads));
TupleSimplifier tuple_simplifier;
TF_ASSIGN_OR_RETURN(bool tuple_simplifier_changed,
tuple_simplifier.Run(module, execution_threads));
HloDCE hlo_dce;
TF_ASSIGN_OR_RETURN(bool hlo_dce_changed,
hlo_dce.Run(module, execution_threads));
VLOG(2) << "After HloModuleDCE:";
XLA_VLOG_LINES(3, module->ToString());
return hlo_module_dce_changed | hlo_dce_changed | tuple_simplifier_changed |
while_loop_simplifier_changed;
}
} | #include "xla/service/hlo_module_dce.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class HloModuleDceTest : public HloTestBase {
protected:
HloModuleDceTest() {}
bool HasInstruction(const HloComputation& computation,
const HloInstruction* instruction) {
return absl::c_linear_search(computation.instructions(), instruction);
}
bool WhileBodyHasPassThroughTupleElement(const HloComputation* computation,
const std::string& while_name,
const int64_t tuple_index) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile &&
instruction->name() == while_name) {
auto* while_body_comp = instruction->while_body();
auto* while_body_param = while_body_comp->parameter_instruction(0);
auto* while_body_root = while_body_comp->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
return false;
}
auto* operand = while_body_root->operand(tuple_index);
if (operand->opcode() == HloOpcode::kGetTupleElement &&
operand->tuple_index() == tuple_index &&
operand->operand(0) == while_body_param) {
return true;
}
return false;
}
}
return false;
}
std::vector<const HloInstruction*> GetWhileLoops(
const HloComputation* computation) {
std::vector<const HloInstruction*> while_loops;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
while_loops.push_back(instruction);
}
}
return while_loops;
}
};
TEST_F(HloModuleDceTest, WhileWithLiveOutputs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, WhileWithUnusedSideEffectingTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], f32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = f32[] get-tuple-element(loop_var.1), index=1
constant.2 = f32[] constant(1.0)
rng = f32[] rng(constant.2, get-tuple-element.2), distribution=rng_uniform
add.1 = f32[] add(get-tuple-element.2, constant.2)
ROOT tuple = (s32[], f32[]) tuple(add, add.1)
}
SimpleLoop.condition {
loop_var.2 = (s32[], f32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.3 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.3), direction=LT
}
ENTRY SimpleLoop {
constant.4 = s32[] constant(0)
constant.5 = f32[] constant(0.0)
tuple.1 = (s32[], f32[]) tuple(constant.4, constant.5)
while = (s32[], f32[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, OneWhileWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(1, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
}
TEST_F(HloModuleDceTest, OneWhileWithTupleElementUsedByCond) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
multiply = s32[] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[]) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=1
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.3, constant.4)
while = (s32[], s32[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body0 {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
multiply.1 = s32[3]{0} multiply(get-tuple-element.5, get-tuple-element.5)
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, multiply.1)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(10)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[], s32[3]{0}) tuple(constant.5, constant.6)
while.1 = (s32[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[] get-tuple-element(while.1), index=0
tuple.3 = (s32[], s32[3]{0}) tuple(get-tuple-element.7, constant.6)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(2, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape()));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadTupleElementSwizzled) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body0 {
loop_var.1 = (s32[3]{0}, s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=1
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=0
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[3]{0}, s32[]) tuple(multiply, add)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[3]{0}, s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=1
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
multiply.1 = s32[3]{0} multiply(get-tuple-element.5, get-tuple-element.5)
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, multiply.1)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(10)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[3]{0}, s32[]) tuple(constant.6, constant.5)
while.1 = (s32[3]{0}, s32[]) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[] get-tuple-element(while.1), index=1
tuple.3 = (s32[], s32[3]{0}) tuple(get-tuple-element.7, constant.6)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(2, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape()));
}
TEST_F(HloModuleDceTest, WhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
WhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
WhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT rtuple = () tuple()
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
}
TEST_F(HloModuleDceTest, WhileWithOnlyLoopVariableBumping) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule InfiniteLoop
WhileBody {
body_param = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
get-tuple-element.2 = s32[] get-tuple-element(body_param), index=1
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[], s32[]) tuple(add, get-tuple-element.2)
}
WhileCondition {
cond_param = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
p0 = (s32[]) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(p0), index=0
constant.3 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.3, get-tuple-element.5)
while = (s32[], s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=1
})")
.value();
HloModuleDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadWhileLoop) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TwoWhilesWithDeadWhileLoop
SimpleLoop.body0 {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
ROOT tuple = (s32[], s32[3]{0}) tuple(add, get-tuple-element.2)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, get-tuple-element.5)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(5)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[], s32[3]{0}) tuple(constant.5, constant.6)
while.1 = (s32[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[3]{0} get-tuple-element(while.1), index=1
constant.7 = s32[] constant(0)
tuple.3 = (s32[], s32[3]{0}) tuple(constant.7, get-tuple-element.7)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_TRUE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_TRUE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(1, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
}
}
} |
1,886 | cpp | tensorflow/tensorflow | loop_schedule_linearizer | third_party/xla/xla/service/loop_schedule_linearizer.cc | third_party/xla/xla/service/loop_schedule_linearizer_test.cc | #ifndef XLA_SERVICE_LOOP_SCHEDULE_LINEARIZER_H_
#define XLA_SERVICE_LOOP_SCHEDULE_LINEARIZER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class LoopScheduleLinearizer : public HloModulePass {
public:
absl::string_view name() const override { return "loop-schedule-linearizer"; }
explicit LoopScheduleLinearizer(
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr)
: can_share_buffer_(can_share_buffer) {}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloDataflowAnalysis::CanShareBuffer can_share_buffer_;
};
}
#endif
#include "xla/service/loop_schedule_linearizer.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ComputationInstructionOrdering {
public:
explicit ComputationInstructionOrdering(const HloComputation& computation) {
for (const HloInstruction* instr : computation.instructions()) {
for (const HloInstruction* control_pred : instr->control_predecessors()) {
CHECK(this->InsertEdge(*control_pred, *instr))
<< "Graph already contained a cycle";
}
for (int op_id = 0; op_id < instr->operand_count(); op_id++) {
const HloInstruction* op = instr->operand(op_id);
CHECK(this->InsertEdge(*op, *instr))
<< "Graph already contained a cycle";
}
}
}
int32_t NodeIdForInstruction(const HloInstruction& instr) {
int32_t instruction_id = instr.unique_id();
auto it = node_id_to_graph_id_.find(instruction_id);
if (it != node_id_to_graph_id_.end()) {
return it->second;
}
int32_t node_id = graph_cycles_.NewNode();
node_id_to_graph_id_[instruction_id] = node_id;
return node_id;
}
bool InsertEdge(const HloInstruction& source, const HloInstruction& dest) {
int32_t source_id = NodeIdForInstruction(source);
int32_t dest_id = NodeIdForInstruction(dest);
return graph_cycles_.InsertEdge(source_id, dest_id);
}
private:
absl::flat_hash_map<int32_t, int32_t> node_id_to_graph_id_;
tensorflow::GraphCycles graph_cycles_;
};
}
static absl::StatusOr<bool> AddControlEdgesForLoopWrites(
HloInstruction* xla_while, HloAliasAnalysis& alias_analysis) {
HloDataflowAnalysis& dataflow = alias_analysis.dataflow_analysis();
HloComputation* body = xla_while->while_body();
HloInstruction* root = body->root_instruction();
HloInstruction* input = body->parameter_instruction(0);
bool changed = false;
ComputationInstructionOrdering ordering(*body);
ShapeTree<bool> indices_to_copy(&xla_while->shape());
for (auto& p : indices_to_copy) {
const ShapeIndex& index = p.first;
if (index.empty()) {
continue;
}
if (dataflow.GetValueSet(root, index).values().size() > 1 ||
dataflow.GetValueSet(input, index).values().size() > 1) {
VLOG(2) << "Index " << index.ToString() << " is associated with multiple "
<< "values, not attempting to introduce stricter dependencies";
} else {
HloValue& value_at_root = dataflow.GetUniqueValueAt(root, index);
HloValue& value_at_input = dataflow.GetUniqueValueAt(input, index);
if (value_at_root.shape().IsTuple()) {
continue;
}
HloInstruction* write = value_at_root.defining_instruction();
for (const HloUse& use : value_at_input.GetUses()) {
HloInstruction* read = use.instruction;
if (read != write &&
value_at_root != value_at_input
&& read->parent() == write->parent()) {
VLOG(2) << "Inside " << body->name() << ", index "
<< index.ToString();
if (!ordering.InsertEdge(*read, *write)) {
VLOG(2) << "Not adding a control dependency from "
<< read->ToShortString() << " to " << write->ToShortString()
<< " as it would introduce a cycle";
continue;
}
if (!absl::c_linear_search(read->control_successors(), write)) {
TF_RETURN_IF_ERROR(read->AddControlDependencyTo(write));
VLOG(2) << "Adding dependency: " << read->ToShortString()
<< " before " << write->ToShortString();
changed = true;
}
}
}
}
}
return changed;
}
absl::StatusOr<bool> LoopScheduleLinearizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<HloAliasAnalysis> alias_analysis;
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
const HloComputation* body = instruction->while_body();
bool has_async_collectives =
absl::c_any_of(body->instructions(), [](const HloInstruction* instr) {
return hlo_query::IsAsyncCollectiveStartOp(
instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(
instr, true);
});
if (has_async_collectives) {
VLOG(2) << "Skipping " << instruction->name()
<< " since body has async collectives";
continue;
}
if (alias_analysis == nullptr) {
TF_ASSIGN_OR_RETURN(alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
}
TF_ASSIGN_OR_RETURN(bool updated_loop, AddControlEdgesForLoopWrites(
instruction, *alias_analysis));
changed |= updated_loop;
}
}
return changed;
}
} | #include "xla/service/loop_schedule_linearizer.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/copy_insertion.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
int64_t CountControlEdges(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountControlEdges(*computation);
}
return count;
}
class LoopScheduleLinearizerTest : public HloTestBase {
protected:
void InsertCopies(HloModule* module, bool expect_change) {
LoopScheduleLinearizer loop_schedule_linearizer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, loop_schedule_linearizer.Run(module));
ASSERT_EQ(changed, expect_change);
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module).status());
}
};
TEST_F(LoopScheduleLinearizerTest, NoExtraCopiesRequired) {
absl::string_view hlo_string = R"(
HloModule module
while_body {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
buffer = s32[] get-tuple-element(input), index=1
one = s32[] constant(1)
updated_counter = s32[] add(counter, one)
updated_buffer = s32[] add(buffer, counter)
ROOT out = (s32[], s32[]) tuple(updated_counter, updated_buffer)
}
while_cond {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
bound = s32[] constant(100)
ROOT cmp = pred[] compare(counter, bound), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
buffer = s32[] parameter(0)
while_input = (s32[], s32[]) tuple(zero, buffer)
ROOT out = (s32[], s32[]) while(while_input), condition=while_cond, body=while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get(), true);
EXPECT_EQ(CountCopies(
*module->entry_computation()->root_instruction()->while_body()),
0);
EXPECT_EQ(CountControlEdges(
*module->entry_computation()->root_instruction()->while_body()),
1);
}
TEST_F(LoopScheduleLinearizerTest, SkipAsyncCollectives) {
absl::string_view hlo_string = R"(
HloModule module
add {
x = s32[] parameter(0)
y = s32[] parameter(1)
ROOT add = s32[] add(x, y)
}
while_body {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
buffer = s32[] get-tuple-element(input), index=1
one = s32[] constant(1)
updated_counter = s32[] add(counter, one)
updated_buffer = s32[] add(buffer, counter)
ar_start = s32[] all-reduce-start(updated_buffer), replica_groups={}, to_apply=add
ar_done = s32[] all-reduce-done(ar_start)
ROOT out = (s32[], s32[]) tuple(updated_counter, ar_done)
}
while_cond {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
bound = s32[] constant(100)
ROOT cmp = pred[] compare(counter, bound), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
buffer = s32[] parameter(0)
while_input = (s32[], s32[]) tuple(zero, buffer)
ROOT out = (s32[], s32[]) while(while_input), condition=while_cond, body=while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get(), false);
}
}
} |
1,887 | cpp | tensorflow/tensorflow | shaped_buffer | third_party/xla/xla/service/shaped_buffer.cc | third_party/xla/xla/service/shaped_buffer_test.cc | #ifndef XLA_SERVICE_SHAPED_BUFFER_H_
#define XLA_SERVICE_SHAPED_BUFFER_H_
#include <memory>
#include <ostream>
#include <string>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/xla_data.pb.h"
namespace xla {
class ScopedShapedBuffer;
class ShapedBuffer {
public:
ShapedBuffer(Shape on_device_shape, int device_ordinal);
ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal);
ShapedBuffer(ShapedBuffer&& s);
ShapedBuffer& operator=(ShapedBuffer&&);
ShapedBuffer(const ShapedBuffer&) = delete;
ShapedBuffer& operator=(const ShapedBuffer&) = delete;
ShapedBuffer(const ScopedShapedBuffer&) = delete;
ShapedBuffer& operator=(const ScopedShapedBuffer&) = delete;
virtual ~ShapedBuffer();
const Shape& on_host_shape() const { return on_host_shape_; }
const Shape& on_device_shape() const { return on_device_shape_; }
int device_ordinal() const { return device_ordinal_; }
const se::DeviceMemoryBase& root_buffer() const {
return buffer({});
}
const se::DeviceMemoryBase& buffer(const ShapeIndex& index) const {
return buffers_.element(index);
}
void set_buffer(const se::DeviceMemoryBase& buffer, const ShapeIndex& index) {
*buffers_.mutable_element(index) = buffer;
}
void set_buffers(ShapeTree<se::DeviceMemoryBase> buffers) {
CHECK(ShapeUtil::Equal(buffers.shape(), on_device_shape_));
buffers_ = std::move(buffers);
buffers_.replace_shape_ptr(on_device_shape_);
}
void set_shapes(const Shape& on_device_shape) {
CHECK(ShapeUtil::EqualStructure(on_device_shape, on_device_shape_))
<< "Structures are not the same. new: " << on_device_shape
<< ", old: " << on_device_shape_;
on_host_shape_ = ShapeUtil::DeviceShapeToHostShape(on_device_shape);
on_device_shape_ = on_device_shape;
buffers_.replace_shape_ptr(on_device_shape_);
}
void set_shapes(const Shape& on_host_shape, const Shape& on_device_shape) {
set_shapes(on_device_shape);
}
const ShapeTree<se::DeviceMemoryBase>& buffers() const { return buffers_; }
ShapeTree<se::DeviceMemoryBase>& buffers() { return buffers_; }
absl::StatusOr<ShapedBuffer> SubShapedBuffer(const ShapeIndex& index) const;
void clear();
std::string ToString() const;
protected:
Shape on_host_shape_;
Shape on_device_shape_;
int device_ordinal_;
ShapeTree<se::DeviceMemoryBase> buffers_;
};
std::ostream& operator<<(std::ostream& out, const ShapedBuffer& buffer);
class ScopedShapedBuffer : public ShapedBuffer {
public:
explicit ScopedShapedBuffer(Shape on_device_shape,
se::DeviceMemoryAllocator* allocator,
int device_ordinal);
explicit ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape,
se::DeviceMemoryAllocator* allocator,
int device_ordinal);
explicit ScopedShapedBuffer(ShapedBuffer shaped_buffer,
se::DeviceMemoryAllocator* allocator);
ScopedShapedBuffer(ScopedShapedBuffer&& s);
ScopedShapedBuffer& operator=(ScopedShapedBuffer&&);
ScopedShapedBuffer(const ScopedShapedBuffer&) = delete;
ScopedShapedBuffer& operator=(const ScopedShapedBuffer&) = delete;
~ScopedShapedBuffer() override;
se::DeviceMemoryAllocator* memory_allocator() const { return allocator_; }
void set_buffer(se::OwningDeviceMemory buffer, const ShapeIndex& index) {
if (!buffer.is_null()) {
CHECK_EQ(buffer.device_ordinal(), device_ordinal());
CHECK_EQ(buffer.allocator(), allocator_);
*buffers_.mutable_element(index) = buffer.Release();
} else {
*buffers_.mutable_element(index) = se::DeviceMemoryBase();
}
}
[[nodiscard]] ShapedBuffer release();
ScopedShapedBuffer TakeSubTree(ShapeIndexView index);
protected:
void Deallocate();
se::DeviceMemoryAllocator* allocator_;
};
}
#endif
#include "xla/service/shaped_buffer.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/layout_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
ShapedBuffer::ShapedBuffer(Shape on_device_shape, int device_ordinal)
: on_device_shape_(std::move(on_device_shape)),
device_ordinal_(device_ordinal),
buffers_(&on_device_shape_) {
on_host_shape_ = ShapeUtil::DeviceShapeToHostShape(on_device_shape_);
}
ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape,
int device_ordinal)
: ShapedBuffer(on_device_shape, device_ordinal) {}
ShapedBuffer::ShapedBuffer(ShapedBuffer&& s)
: on_host_shape_(std::move(s.on_host_shape_)),
on_device_shape_(std::move(s.on_device_shape_)),
device_ordinal_(s.device_ordinal_),
buffers_(std::move(s.buffers_)) {
buffers_.replace_shape_ptr(on_device_shape_);
}
ShapedBuffer& ShapedBuffer::operator=(ShapedBuffer&& s) {
on_device_shape_ = std::move(s.on_device_shape_);
on_host_shape_ = std::move(s.on_host_shape_);
device_ordinal_ = s.device_ordinal_;
buffers_ = std::move(s.buffers_);
buffers_.replace_shape_ptr(on_device_shape_);
return *this;
}
ShapedBuffer::~ShapedBuffer() {}
absl::StatusOr<ShapedBuffer> ShapedBuffer::SubShapedBuffer(
const ShapeIndex& index) const {
TF_ASSIGN_OR_RETURN(const Shape* device_sub_shape,
ShapeUtil::TryGetSubshape(on_device_shape(), index));
ShapedBuffer sub_shaped_buffer(*device_sub_shape, device_ordinal_);
TF_ASSIGN_OR_RETURN(ShapeTree<se::DeviceMemoryBase> sub_buffers,
buffers_.SubShapeTree(index));
sub_shaped_buffer.set_buffers(std::move(sub_buffers));
return std::move(sub_shaped_buffer);
}
void ShapedBuffer::clear() {
for (auto& pair : buffers_) {
pair.second = se::DeviceMemoryBase();
}
}
std::string ShapedBuffer::ToString() const {
std::string s =
absl::StrCat("ShapedBuffer(", device_ordinal(),
"), on-device shape=" +
ShapeUtil::HumanStringWithLayout(on_device_shape()),
":\n");
ShapeUtil::ForEachSubshape(
on_device_shape(),
[this, &s](const Shape& subshape, const ShapeIndex& index) {
std::string shape_str;
if (subshape.IsTuple()) {
shape_str = "tuple";
} else {
shape_str = ShapeUtil::HumanStringWithLayout(subshape);
}
const se::DeviceMemoryBase& memory = buffer(index);
absl::StrAppendFormat(&s, " %s%p (%d bytes) : %s\n",
std::string(index.size() * 2, ' '),
memory.opaque(), memory.size(), shape_str);
});
return s;
}
std::ostream& operator<<(std::ostream& out, const ShapedBuffer& buffer) {
out << buffer.ToString();
return out;
}
ScopedShapedBuffer::ScopedShapedBuffer(Shape on_device_shape,
se::DeviceMemoryAllocator* allocator,
int device_ordinal)
: ShapedBuffer(std::move(on_device_shape), device_ordinal),
allocator_(allocator) {}
ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape,
Shape on_device_shape,
se::DeviceMemoryAllocator* allocator,
int device_ordinal)
: ScopedShapedBuffer(std::move(on_device_shape), allocator,
device_ordinal) {}
ScopedShapedBuffer::ScopedShapedBuffer(ShapedBuffer shaped_buffer,
se::DeviceMemoryAllocator* allocator)
: ShapedBuffer(std::move(shaped_buffer)), allocator_(allocator) {}
ScopedShapedBuffer::ScopedShapedBuffer(ScopedShapedBuffer&& s)
: ShapedBuffer(static_cast<ShapedBuffer&&>(s)), allocator_(s.allocator_) {
s.allocator_ = nullptr;
}
ScopedShapedBuffer& ScopedShapedBuffer::operator=(ScopedShapedBuffer&& s) {
Deallocate();
*static_cast<ShapedBuffer*>(this) = std::move(static_cast<ShapedBuffer&>(s));
allocator_ = s.allocator_;
s.allocator_ = nullptr;
return *this;
}
ScopedShapedBuffer::~ScopedShapedBuffer() { Deallocate(); }
ShapedBuffer ScopedShapedBuffer::release() {
ShapedBuffer shaped_buffer(static_cast<ShapedBuffer&&>(*this));
buffers_ = ShapeTree<se::DeviceMemoryBase>();
return shaped_buffer;
}
void ScopedShapedBuffer::Deallocate() {
if (allocator_ == nullptr) {
return;
}
absl::flat_hash_set<void*> deallocated_ptrs;
for (auto& pair : buffers_) {
se::DeviceMemoryBase& memory_base = pair.second;
if (!memory_base.is_null() &&
deallocated_ptrs.insert(memory_base.opaque()).second) {
TF_CHECK_OK(allocator_->Deallocate(device_ordinal(), memory_base));
}
}
}
ScopedShapedBuffer ScopedShapedBuffer::TakeSubTree(ShapeIndexView index) {
const xla::Shape& sub_on_device_shape =
xla::ShapeUtil::GetSubshape(on_device_shape(), {index});
ScopedShapedBuffer output(sub_on_device_shape, memory_allocator(),
device_ordinal());
auto src_it = buffers().find(index);
auto dst_it = output.buffers().begin();
while (dst_it != output.buffers().end()) {
dst_it->second = src_it->second;
src_it->second = tensorflow::se::DeviceMemoryBase(nullptr, 0);
++src_it;
++dst_it;
}
return output;
}
} | #include "xla/service/shaped_buffer.h"
#include <memory>
#include <utility>
#include <vector>
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
TEST(ShapedBufferTest, ScopedShapeBufferAsShapedBufferB71629047) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(auto executors,
xla::PlatformUtil::GetStreamExecutors(platform));
xla::se::StreamExecutorMemoryAllocator allocator(platform, executors);
const xla::Shape shape = xla::ShapeUtil::MakeShape(xla::F32, {});
const int kDeviceOrdinal = 0;
auto scoped_buffer = std::make_unique<xla::ScopedShapedBuffer>(
shape, shape, &allocator, kDeviceOrdinal);
std::unique_ptr<xla::ShapedBuffer> buffer = std::move(scoped_buffer);
buffer = nullptr;
}
class TestAllocator : public se::DeviceMemoryAllocator {
public:
TestAllocator()
: se::DeviceMemoryAllocator(PlatformUtil::GetDefaultPlatform().value()) {}
~TestAllocator() override {
if (!allocations_.empty()) {
ADD_FAILURE() << "Some allocations not freed!";
}
}
using se::DeviceMemoryAllocator::Allocate;
absl::StatusOr<se::OwningDeviceMemory> Allocate(
int device_ordinal, uint64_t size, bool ,
int64_t ) override {
if (size == 0) {
return se::OwningDeviceMemory();
}
void* buf = malloc(size);
allocations_.insert({device_ordinal, buf});
return se::OwningDeviceMemory(se::DeviceMemoryBase(buf, size),
device_ordinal, this);
}
absl::Status Deallocate(int device_ordinal,
se::DeviceMemoryBase mem) override {
if (mem.is_null()) {
return absl::OkStatus();
}
auto it = allocations_.find({device_ordinal, mem.opaque()});
if (it == allocations_.end()) {
ADD_FAILURE() << "Allocation not found (double free?)";
} else {
free(mem.opaque());
allocations_.erase(it);
}
return absl::OkStatus();
}
bool AllowsAsynchronousDeallocation() const override { return false; }
absl::StatusOr<se::Stream*> GetStream(int device_ordinal) override {
LOG(FATAL) << "Not implemented";
}
private:
std::set<std::pair< int64_t, void*>> allocations_;
};
TEST(ScopedShapedBufferTest, TestMoveAssignmentOperator) {
Shape s = ShapeUtil::MakeShape(F32, {1});
TestAllocator allocator;
ScopedShapedBuffer sb1(s, &allocator, 0);
sb1.set_buffer(allocator.Allocate(0, 42).value(),
{});
ScopedShapedBuffer sb2(s, &allocator, 1);
sb2.set_buffer(allocator.Allocate(1, 10).value(),
{});
sb1 = std::move(sb2);
}
TEST(ScopedShapedBufferTest, TestTakeSubTree) {
TestAllocator allocator;
Shape s = ShapeUtil::MakeShape(F32, {1});
s = xla::ShapeUtil::MakeTupleShape(std::vector<xla::Shape>(2, s));
s = xla::ShapeUtil::MakeTupleShape(std::vector<xla::Shape>(3, s));
ScopedShapedBuffer sb(s, &allocator, 0);
sb.buffers().ForEachMutableElement(
[&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory m,
allocator.Allocate(0, 77));
*buffer = m.Release();
});
ShapeTree<se::DeviceMemoryBase> buffers = sb.buffers();
xla::ShapeIndex subtree_index = {1};
ScopedShapedBuffer output = sb.TakeSubTree(subtree_index);
output.buffers().ForEachElement([&](const xla::ShapeIndex& sub_index,
const se::DeviceMemoryBase& buffer) {
xla::ShapeIndex orig_index = subtree_index;
for (int i : sub_index) {
orig_index.push_back(i);
}
EXPECT_TRUE(buffers.find(orig_index)->second.IsSameAs(buffer));
});
sb.buffers().ForEachElement([&](const xla::ShapeIndex& index,
const se::DeviceMemoryBase& buffer) {
if ((index.size() >= subtree_index.size()) &&
ShapeIndexView(index).first(subtree_index.size()) == subtree_index) {
EXPECT_TRUE(buffer.is_null());
} else {
EXPECT_TRUE(buffers.find(index)->second.IsSameAs(buffer));
}
});
}
TEST(ScopedShapedBufferTest, TestSubShapeTree) {
Shape array_shape = ShapeUtil::MakeShape(F32, {1});
Shape tuple_shape =
xla::ShapeUtil::MakeTupleShape({array_shape, array_shape});
TestAllocator allocator;
ScopedShapedBuffer sb(tuple_shape, &allocator, 0);
sb.buffers().ForEachMutableElement(
[&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory m,
allocator.Allocate(0, 32));
*buffer = m.Release();
});
auto ssb_statusor = sb.SubShapedBuffer({1});
ASSERT_TRUE(ssb_statusor.ok());
auto ssb = std::move(ssb_statusor).value();
EXPECT_EQ(ssb.on_host_shape(), array_shape);
EXPECT_EQ(ssb.on_device_shape(), array_shape);
}
void BM_TakeSubTree(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
TestAllocator allocator;
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = xla::ShapeUtil::MakeTupleShape(shapes);
}
xla::ScopedShapedBuffer shaped_buffer(shape, &allocator,
0);
for (auto s : state) {
(void)shaped_buffer.TakeSubTree({fan_out / 2}).release();
}
}
BENCHMARK(BM_TakeSubTree)
->ArgPair(1, 4)
->ArgPair(1, 8)
->ArgPair(1, 32)
->ArgPair(1, 64)
->ArgPair(1, 128)
->ArgPair(1, 256)
->ArgPair(1, 512)
->ArgPair(2, 4)
->ArgPair(2, 8)
->ArgPair(2, 32)
->ArgPair(2, 64)
->ArgPair(2, 128);
}
} |
1,888 | cpp | tensorflow/tensorflow | hlo_constant_folding | third_party/xla/xla/service/hlo_constant_folding.cc | third_party/xla/xla/service/hlo_constant_folding_test.cc | #ifndef XLA_SERVICE_HLO_CONSTANT_FOLDING_H_
#define XLA_SERVICE_HLO_CONSTANT_FOLDING_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloConstantFolding : public HloModulePass {
public:
absl::string_view name() const override { return "constant_folding"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
static std::atomic<int64_t> slow_op_counter_;
};
}
#endif
#include "xla/service/hlo_constant_folding.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
namespace xla {
static bool IsOrContainsIllegalInstr(const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kAfterAll ||
instr->opcode() == HloOpcode::kRng) {
return true;
}
for (const HloComputation* c : instr->called_computations()) {
if (absl::c_any_of(c->instructions(), IsOrContainsIllegalInstr)) {
return true;
}
}
return false;
}
std::atomic<int64_t> HloConstantFolding::slow_op_counter_{0};
absl::StatusOr<bool> HloConstantFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto evaluator = std::make_unique<HloEvaluator>(0);
evaluator->set_use_fast_path(true);
std::vector<HloInstruction*> dead_instructions;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->IsDead()) {
continue;
}
if (!absl::c_any_of(instruction->operands(),
HloPredicateIsOp<HloOpcode::kConstant>) ||
!absl::c_all_of(
instruction->operands(), [](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kConstant ||
(operand->opcode() == HloOpcode::kBroadcast &&
operand->operand(0)->opcode() == HloOpcode::kConstant);
})) {
continue;
}
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->opcode() == HloOpcode::kConstant ||
instruction->opcode() == HloOpcode::kTuple) {
continue;
}
if (instruction->opcode() == HloOpcode::kBroadcast ||
instruction->opcode() == HloOpcode::kIota) {
continue;
}
if (instruction->IsAsynchronous() &&
instruction->async_execution_thread() !=
instruction->parent()->execution_thread()) {
continue;
}
if (instruction->opcode() == HloOpcode::kFft) {
continue;
}
if (IsOrContainsIllegalInstr(instruction)) {
continue;
}
if (instruction->HasSideEffect()) {
continue;
}
if (instruction->shape().IsArray()) {
int64_t elements_in_operands = 0;
for (HloInstruction* operand : instruction->operands()) {
if (operand->shape().IsArray()) {
elements_in_operands += ShapeUtil::ElementsIn(operand->shape());
}
}
int64_t elements_in_constant =
ShapeUtil::ElementsIn(instruction->shape());
static const int64_t kMaximumConstantSizeElements = 45 * 1000 * 1000;
if (std::max(elements_in_constant, elements_in_operands) >
kMaximumConstantSizeElements) {
continue;
}
}
VLOG(5) << "Constant folding: " << instruction->ToString();
absl::Duration slow_timeout =
absl::Seconds(uint64_t{1} << slow_op_counter_.load());
SlowOperationAlarm slow_alarm(slow_timeout, [instruction, slow_timeout] {
const bool ndebug =
#if NDEBUG
true;
#else
false;
#endif
absl::string_view explanation_msg =
ndebug
? "This isn't necessarily a bug; constant-folding is "
"inherently a trade-off between compilation time and speed "
"at runtime. XLA has some guards that attempt to keep "
"constant folding from taking too long, but fundamentally "
"you'll always be able to come up with an input program that "
"takes a long time.\n\n"
"If you'd like to file a bug, run with envvar "
"XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results."
: "XLA was built without compiler optimizations, which can be "
"slow. Try rebuilding with -c opt.";
return absl::StrFormat(
"Constant folding an instruction is taking > %s:\n\n"
" %s\n\n"
"%s",
absl::FormatDuration(slow_timeout), instruction->ToString(),
explanation_msg);
});
Literal result;
if (!evaluator->TryEvaluate(
instruction, &result,
true)) {
VLOG(2) << "Constant folding failed for instruction: "
<< instruction->ToString();
continue;
}
slow_alarm.cancel();
if (slow_alarm.fired()) {
slow_op_counter_++;
}
VLOG(4) << "Constant folded: " << instruction->ToString();
dead_instructions.push_back(instruction);
HloInstruction* new_constant = instruction->AddInstruction(
HloInstruction::CreateConstant(std::move(result)));
if (new_constant->shape().has_layout()) {
new_constant->mutable_shape()
->mutable_layout()
->set_element_size_in_bits(
instruction->shape().layout().element_size_in_bits());
}
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_constant));
}
}
const bool changed = !dead_instructions.empty();
for (HloInstruction* dead_instruction : dead_instructions) {
CHECK(dead_instruction->IsDead());
HloComputation* computation = dead_instruction->parent();
TF_RETURN_IF_ERROR(computation->RemoveInstruction(dead_instruction));
}
return changed;
}
} | #include "xla/service/hlo_constant_folding.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/permutation_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
namespace m = xla::match;
using HloConstantFoldingTest = HloTestBase;
TEST_F(HloConstantFoldingTest, ConvertF32ToS64) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {}), input));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));
EXPECT_EQ(
computation->root_instruction()->literal().GetFirstElement<int64_t>(),
42);
}
TEST_F(HloConstantFoldingTest, ConvertS64ToF32) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(42)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));
EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(),
42.0f);
}
TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({42.0f, 19.0f})));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {2}), input));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));
EXPECT_EQ(computation->root_instruction()->literal().Get<int64_t>({0}), 42);
EXPECT_EQ(computation->root_instruction()->literal().Get<int64_t>({1}), 19);
}
TEST_F(HloConstantFoldingTest, Concatenate) {
const struct TestConfig {
int concat_dimension;
std::vector<int64_t> dimensions;
std::vector<int64_t> concat_sizes;
} test_configs[] = {
{1, {11, 0, 7, 5, 9}, {2, 5, 7, 11}},
{3, {1, 4, 17, 0, 8}, {1, 3, 9, 12}},
};
for (auto& test_config : test_configs) {
HloComputation::Builder builder(TestName());
std::vector<int64_t> dimensions(test_config.dimensions.begin(),
test_config.dimensions.end());
int64_t concat_size = 0;
std::vector<HloInstruction*> operands;
for (auto csize : test_config.concat_sizes) {
dimensions[test_config.concat_dimension] = csize;
concat_size += csize;
auto literal = LiteralUtil::CreateFromDimensions(F32, dimensions);
HloInstruction* insn = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
operands.push_back(insn);
}
dimensions[test_config.concat_dimension] = concat_size;
Shape shape = ShapeUtil::MakeShape(F32, dimensions);
builder.AddInstruction(HloInstruction::CreateConcatenate(
shape, operands, test_config.concat_dimension));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Constant()));
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape));
}
}
TEST_F(HloConstantFoldingTest, Slice) {
HloComputation::Builder builder(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
const int64_t slice_start[] = {4, 2, 3, 1, 5};
const int64_t slice_limits[] = {10, 8, 6, 5, 9};
const int64_t slice_strides[] = {1, 1, 1, 1, 1};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
HloInstruction* literal_instruction = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {6, 6, 3, 4, 4});
builder.AddInstruction(HloInstruction::CreateSlice(
shape, literal_instruction, slice_start, slice_limits, slice_strides));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Constant()));
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape));
}
TEST_F(HloConstantFoldingTest, TransposeConstantFold) {
HloComputation::Builder builder(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal.Clone();
HloInstruction* literal_instruction = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5});
const int64_t permutation[] = {1, 2, 0, 4, 3};
builder.AddInstruction(
HloInstruction::CreateTranspose(shape, literal_instruction, permutation));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Constant()));
EXPECT_TRUE(ShapeUtil::Compatible(root->shape(), shape));
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
bool matched = true;
root->literal().EachCell<NativeT>(
[&](absl::Span<const int64_t> indices, NativeT value) {
std::vector<int64_t> rindexes = PermuteInverse(indices, permutation);
matched = matched && (value == literal_clone.Get<NativeT>(rindexes));
});
EXPECT_TRUE(matched);
}
const char* const kConstantFoldReduce = R"(
HloModule ConstantFoldReduce
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = s32[] add(a, b)
}
ENTRY r {
x = s32[3] constant({1, 2, 3})
init = s32[] constant(0)
ROOT reduce = s32[] reduce(x, init), dimensions={0}, to_apply=add
})";
TEST_F(HloConstantFoldingTest, ConstantFoldReduce) {
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kConstantFoldReduce));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));
EXPECT_TRUE(result);
EXPECT_EQ(6, m->entry_computation()
->root_instruction()
->literal()
.GetFirstElement<int32_t>());
}
constexpr absl::string_view kConstantFoldReduceWithMetadata = R"(
HloModule ConstantFoldReduce
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = s32[] add(a, b)
}
ENTRY r {
x = s32[3] constant({1, 2, 3}), metadata={op_name="constant"}
init = s32[] constant(0), metadata={op_name="zero_constant"}
ROOT reduce = s32[] reduce(x, init), metadata={op_name="reduce"}, dimensions={0}, to_apply=add
})";
TEST_F(HloConstantFoldingTest, ConstantFoldReduceCheckMetadata) {
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(kConstantFoldReduceWithMetadata));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));
EXPECT_TRUE(result);
OpMetadata reduce_metadata;
reduce_metadata.set_op_name("reduce");
EXPECT_THAT(m->entry_computation()->root_instruction(),
AllOf(op::Constant(), op::Metadata(reduce_metadata)));
}
TEST_F(HloConstantFoldingTest, ConstantFoldReduceNoLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kConstantFoldReduce));
HloInstruction* add = (*m->computations().begin())->root_instruction();
LayoutUtil::ClearLayout(add->mutable_shape());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));
EXPECT_FALSE(result);
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reduce()));
}
const char* const kConstantFoldLargePad = R"(
HloModule ConstantFoldLargePad
ENTRY r {
a = f32[1,1,1] constant({{{7}}})
b = f32[] constant(42)
ROOT pad = f32[2048,2048,128] pad(a, b), padding=1024_1023x1024_1023x64_63
})";
TEST_F(HloConstantFoldingTest, DoesNotFoldLargePad) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kConstantFoldLargePad));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_FALSE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Constant(), m::Constant())));
}
TEST_F(HloConstantFoldingTest, DoesNotFoldSlicesWithLargeOperand) {
const char* const kModuleStr = R"(
HloModule test
ENTRY r {
a = f32[] constant(42)
broadcast = f32[1000000000]{0} broadcast(a), dimensions={}
slice1 = f32[10000]{0} slice(broadcast), slice={[0:10000]}
slice2 = f32[10000]{0} slice(broadcast), slice={[10000:20000]}
ROOT add = f32[10000]{0} add(slice1, slice2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_FALSE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Slice(), m::Slice())));
}
TEST_F(HloConstantFoldingTest, DontFoldSubcomputationContainingAfterAll) {
const char* const kModuleStr = R"(
HloModule test
Fn {
tok = token[] after-all()
ROOT root = f32[10] iota(), iota_dimension=0
}
ENTRY entry {
ROOT call = f32[10] call(), to_apply=Fn
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloConstantFoldingTest,
DontFoldSubcomputationTransitivelyContainingRng) {
const char* const kModuleStr = R"(
HloModule test
InnerFn {
c0 = f32[] constant(0)
c1 = f32[] constant(1)
ROOT rng = f32[10] rng(c0, c1), distribution=rng_uniform
}
Fn {
ROOT fusion = f32[10] fusion(), kind=kLoop, calls=InnerFn
}
ENTRY entry {
ROOT call = f32[10] call(), to_apply=Fn
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloConstantFoldingTest, FoldOpsWhereOneOperandIsBroadcast) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
not_folded1 = f32[4] broadcast(f32[] constant(1))
not_folded2 = add(f32[4] broadcast(f32[] constant(2)),
f32[4] broadcast(f32[] constant(3)))
folded1 = add(f32[4] broadcast(f32[] constant(5)),
f32[4] constant({0,1,2,3}))
folded2 = add(f32[4] constant({0,1,2,3}),
f32[4] broadcast(f32[] constant(5)))
ROOT root = tuple(not_folded1, not_folded2, folded1, folded2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Broadcast(m::Constant()),
m::Add(m::Broadcast(m::Constant()),
m::Broadcast(m::Constant())),
m::Constant(),
m::Constant()
)));
}
TEST_F(HloConstantFoldingTest, FoldInt4Ops) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
c0 = s4[2]{0:E(4)} constant({1, 2})
c1 = s4[2]{0:E(4)} constant({3, 4})
add1 = s4[2]{0:E(4)} add(c0, c1)
c2 = s4[]{:E(4)} constant(5)
add2 = s4[2]{0:E(4)} add(c0, s4[2]{0:E(4)} broadcast(c2))
ROOT root = tuple(add1, add2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_TRUE(result);
auto is_4_bit = [](const HloInstruction* instr) {
return instr->shape().layout().element_size_in_bits() == 4;
};
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Constant().WithPredicate(is_4_bit),
m::Constant().WithPredicate(is_4_bit))));
}
TEST_F(HloConstantFoldingTest, BigReduceWindow) {
constexpr absl::string_view kModuleStr = R"(
HloModule test
add_bf16 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY accumulated_all_reduce {
x = bf16[160,10,10,512]{3,2,1,0} broadcast(bf16[] constant(1.0))
init = bf16[] constant(0)
ROOT reduce-window = reduce-window(x, init), window={size=1x2x2x1 stride=1x2x2x1}, to_apply=add_bf16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_TRUE(result);
}
TEST_F(HloConstantFoldingTest, TimingConsumingTest) {
constexpr absl::string_view mod_str = R"(
HloModule jit_f, entry_computation_layout={()->f32[]}
region_0.4 {
Arg_0.5 = f32[] parameter(0)
Arg_1.6 = f32[] parameter(1)
ROOT add.7 = f32[] add(Arg_0.5, Arg_1.6)
}
ENTRY main.9 {
constant.1 = f32[] constant(1)
broadcast.2 = f32[32,999,40,512]{3,2,1,0} broadcast(constant.1), dimensions={}
constant.3 = f32[] constant(0)
ROOT reduce.8 = f32[] reduce(broadcast.2, constant.3), dimensions={0,1,2,3}, to_apply=region_0.4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(mod_str));
HloConstantFolding const_fold;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&const_fold, module.get()));
EXPECT_FALSE(result);
}
}
} |
1,889 | cpp | tensorflow/tensorflow | gather_expander | third_party/xla/xla/service/gather_expander.cc | third_party/xla/xla/service/gather_expander_test.cc | #ifndef XLA_SERVICE_GATHER_EXPANDER_H_
#define XLA_SERVICE_GATHER_EXPANDER_H_
#include "xla/service/op_expander_pass.h"
namespace xla {
class GatherExpander : public OpExpanderPass {
public:
enum Mode {
kEliminateAllGathers,
kEliminateSimpleGathers,
};
explicit GatherExpander(Mode m) : mode_(m) {}
absl::string_view name() const override { return "gather_expander"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* gather_inst) override;
private:
Mode mode_;
};
}
#endif
#include "xla/service/gather_expander.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* start_indices, int64_t index_vector_dim) {
const Shape& start_indices_shape = start_indices->shape();
if (start_indices_shape.dimensions_size() == index_vector_dim) {
return start_indices;
}
if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) {
return start_indices;
}
std::vector<int64_t> permutation;
permutation.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
permutation.push_back(i);
}
}
permutation.push_back(index_vector_dim);
return MakeTransposeHlo(start_indices, permutation);
}
absl::StatusOr<HloInstruction*> CanonicalizeGatherIndices(
HloInstruction* start_indices, int64_t index_vector_dim) {
TF_ASSIGN_OR_RETURN(
HloInstruction * transposed_start_indices,
TransposeIndexVectorDimToLast(start_indices, index_vector_dim));
bool indices_are_scalar =
index_vector_dim == start_indices->shape().dimensions_size();
const int64_t index_dims_in_start_indices = indices_are_scalar ? 0 : 1;
const Shape& shape = transposed_start_indices->shape();
if (shape.dimensions_size() == index_dims_in_start_indices) {
return PrependDegenerateDims(transposed_start_indices, 1);
} else {
return CollapseFirstNDims(
transposed_start_indices,
shape.dimensions_size() - index_dims_in_start_indices);
}
}
absl::StatusOr<HloInstruction*> AdjustBatchDimsInAccumulator(
const Shape& start_indices_shape, HloInstruction* accumulator,
int64_t index_vector_dim) {
std::vector<int64_t> batch_dim_bounds;
batch_dim_bounds.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
batch_dim_bounds.push_back(start_indices_shape.dimensions(i));
}
}
if (batch_dim_bounds.empty()) {
return ElideDegenerateDims(accumulator, {0});
}
return ExpandFirstDimIntoNDims(accumulator, batch_dim_bounds);
}
absl::StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
HloInstruction* index_vector, const GatherDimensionNumbers& dim_numbers,
int64_t operand_rank) {
HloComputation* computation = index_vector->parent();
const Shape& index_shape = index_vector->shape();
if (operand_rank == 0) {
return computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0})));
}
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
std::vector<HloInstruction*> expanded_index_components;
for (int i = 0; i < operand_rank; i++) {
int64_t index_vector_dim_index =
FindIndex(dim_numbers.start_index_map(), i);
if (index_vector_dim_index != dim_numbers.start_index_map_size()) {
TF_ASSIGN_OR_RETURN(
HloInstruction * component_to_concat,
MakeSliceHlo(index_vector, {index_vector_dim_index},
{index_vector_dim_index + 1},
{1}));
expanded_index_components.push_back(component_to_concat);
} else {
expanded_index_components.push_back(zero);
}
}
return MakeConcatHlo(expanded_index_components, 0);
}
absl::StatusOr<std::vector<HloInstruction*>> GatherLoopBody(
const HloInstruction& gather, HloInstruction* induction_var,
const std::vector<HloInstruction*>& incoming_loop_state) {
const GatherDimensionNumbers& dim_numbers = gather.gather_dimension_numbers();
CHECK_EQ(incoming_loop_state.size(), 3);
HloInstruction* const operand = incoming_loop_state[0];
HloInstruction* const start_indices = incoming_loop_state[1];
HloInstruction* const output_accumulator = incoming_loop_state[2];
bool has_scalar_indices = start_indices->shape().dimensions_size() == 1;
CHECK_EQ(has_scalar_indices,
dim_numbers.index_vector_dim() ==
gather.operand(1)->shape().dimensions_size());
HloInstruction* induction_var_as_vector =
MakeBroadcastHlo(induction_var, {},
{1});
HloInstruction* index_vector;
if (has_scalar_indices) {
TF_ASSIGN_OR_RETURN(
index_vector,
MakeDynamicSliceHlo(start_indices, induction_var_as_vector, {1}));
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_start_indices,
PadVectorWithZeros(induction_var_as_vector,
0, 1));
int64_t index_vector_size = start_indices->shape().dimensions(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * index_vector_2d,
MakeDynamicSliceHlo(start_indices, index_into_start_indices,
{1, index_vector_size}));
TF_ASSIGN_OR_RETURN(index_vector,
ElideDegenerateDims(index_vector_2d, {0}));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * gathered_slice_start,
ExpandIndexVectorIntoOperandSpace(index_vector, dim_numbers,
operand->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(HloInstruction * gathered_slice,
MakeDynamicSliceHlo(operand, gathered_slice_start,
gather.gather_slice_sizes()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_with_dims_collapsed,
ElideDegenerateDims(gathered_slice, dim_numbers.collapsed_slice_dims()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_for_update,
PrependDegenerateDims(gathered_slice_with_dims_collapsed, 1));
TF_ASSIGN_OR_RETURN(
HloInstruction* const index_vector_into_accumulator,
PadVectorWithZeros(
induction_var_as_vector, 0,
gathered_slice_with_dims_collapsed->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const updated_accumulator,
MakeDynamicUpdateSliceHlo(output_accumulator, gathered_slice_for_update,
index_vector_into_accumulator));
return absl::StatusOr<std::vector<HloInstruction*>>{
{operand, start_indices, updated_accumulator}};
}
HloInstruction* CreateGatherLoopAccumulatorInitValue(
HloComputation* computation, PrimitiveType element_type,
absl::Span<const int64_t> slice_sizes, int64_t gather_loop_trip_count,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64_t> accumulator_state_shape_dims;
accumulator_state_shape_dims.reserve(1 + slice_sizes.size());
accumulator_state_shape_dims.push_back(gather_loop_trip_count);
for (int64_t i = 0; i < slice_sizes.size(); i++) {
if (!absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
accumulator_state_shape_dims.push_back(slice_sizes[i]);
}
}
return BroadcastZeros(computation, element_type,
accumulator_state_shape_dims);
}
absl::StatusOr<HloInstruction*> PermuteBatchAndOffsetDims(
HloInstruction* accumulator, absl::Span<const int64_t> offset_dims,
int64_t output_rank) {
std::vector<int64_t> permutation;
permutation.reserve(output_rank);
int64_t batch_idx_counter = 0;
int64_t offset_idx_counter = output_rank - offset_dims.size();
for (int64_t i = 0; i < output_rank; i++) {
bool is_offset_dim = absl::c_binary_search(offset_dims, i);
if (is_offset_dim) {
permutation.push_back(offset_idx_counter++);
} else {
permutation.push_back(batch_idx_counter++);
}
}
return MakeTransposeHlo(accumulator, permutation);
}
int64_t GatherLoopTripCount(HloInstruction* gather_instr) {
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& start_indices_shape = start_indices->shape();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t trip_count = 1;
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != dim_numbers.index_vector_dim()) {
trip_count *= start_indices_shape.dimensions(i);
}
}
return trip_count;
}
int64_t GatherIsBroadcast(HloInstruction* gather_instr) {
return absl::c_equal(gather_instr->gather_slice_sizes(),
gather_instr->operand(0)->shape().dimensions());
}
}
absl::StatusOr<HloInstruction*> GatherExpander::ExpandInstruction(
HloInstruction* gather_instr) {
CHECK(!ShapeUtil::IsZeroElementArray(gather_instr->shape()));
if (GatherIsBroadcast(gather_instr)) {
if (ShapeUtil::IsZeroElementArray(gather_instr->operand(0)->shape())) {
return MakeScalarLike(gather_instr, 0);
}
Shape broadcast_operand_shape = ShapeUtil::DeleteDimensions(
gather_instr->gather_dimension_numbers().collapsed_slice_dims(),
gather_instr->operand(0)->shape());
TF_ASSIGN_OR_RETURN(HloInstruction * broadcast_operand,
MakeReshapeHlo(broadcast_operand_shape,
gather_instr->mutable_operand(0)));
gather_instr->SetupDerivedInstruction(broadcast_operand);
HloInstruction* broadcast =
MakeBroadcastHlo(broadcast_operand,
gather_instr->gather_dimension_numbers().offset_dims(),
gather_instr->shape());
gather_instr->SetupDerivedInstruction(broadcast);
return broadcast;
}
HloComputation* computation = gather_instr->parent();
HloInstruction* operand = gather_instr->mutable_operand(0);
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& output_shape = gather_instr->shape();
int64_t output_rank = output_shape.dimensions_size();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t gather_loop_trip_count = GatherLoopTripCount(gather_instr);
if (!IsInt32(gather_loop_trip_count)) {
return Unimplemented(
"Gather operations with more than 2147483647 gather indices are not "
"supported. This error occurred for %s.",
gather_instr->ToString());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * canonical_start_indices,
CanonicalizeGatherIndices(start_indices, dim_numbers.index_vector_dim()));
CHECK_EQ(gather_loop_trip_count,
canonical_start_indices->shape().dimensions(0));
HloInstruction* accumulator_init = CreateGatherLoopAccumulatorInitValue(
computation, output_shape.element_type(),
gather_instr->gather_slice_sizes(), gather_loop_trip_count,
gather_instr->gather_dimension_numbers());
absl::StatusOr<std::vector<HloInstruction*>> gather_loop_result_or_error =
WhileUtil::MakeCountedLoop(
computation, gather_loop_trip_count,
{operand, canonical_start_indices, accumulator_init},
[&](HloInstruction* indvar,
const std::vector<HloInstruction*>& loop_state) {
return GatherLoopBody(*gather_instr, indvar, loop_state);
},
gather_instr->metadata());
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> gather_loop_result,
gather_loop_result_or_error);
HloInstruction* accumulator_result = gather_loop_result.back();
TF_ASSIGN_OR_RETURN(
HloInstruction* const accumulator_with_batch_dims_decanonicalized,
AdjustBatchDimsInAccumulator(start_indices->shape(), accumulator_result,
dim_numbers.index_vector_dim()));
return PermuteBatchAndOffsetDims(accumulator_with_batch_dims_decanonicalized,
dim_numbers.offset_dims(), output_rank);
}
bool GatherExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kGather &&
!ShapeUtil::IsZeroElementArray(inst->shape()) &&
(mode_ == kEliminateAllGathers || GatherLoopTripCount(inst) == 1 ||
absl::c_equal(inst->gather_slice_sizes(),
inst->operand(0)->shape().dimensions()));
}
} | #include "xla/service/gather_expander.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
using GatherExpanderTest = HloTestBase;
TEST_F(GatherExpanderTest, ErrorStatusOnTooManyIndices) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherMultipleBatchDims
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2147483647,5] parameter(1)
ROOT gather = s32[2147483647,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
absl::Status status = GatherExpander{GatherExpander::kEliminateAllGathers}
.Run(module.get())
.status();
EXPECT_EQ(status.code(), tsl::error::UNIMPLEMENTED);
ASSERT_THAT(
status.message(),
::testing::HasSubstr("Gather operations with more than 2147483647 gather "
"indices are not supported."));
}
TEST_F(GatherExpanderTest, AvoidDegenerateDims) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
const Shape& while_shape = while_instr->shape();
ASSERT_TRUE(while_shape.IsTuple());
ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4);
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {3, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 1)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::GetTupleElementShape(while_shape, 2)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 3)));
}
TEST_F(GatherExpanderTest, CheckOpMetadata) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
OpMetadata metadata;
metadata.set_op_name("Gather");
module->entry_computation()->root_instruction()->set_metadata(metadata);
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
EXPECT_EQ(while_instr->metadata().op_name(), "Gather");
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersSkipsNontrivialGather) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_FALSE(changed);
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersRewritesTrivialGather) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[100] parameter(0)
indices = s32[1] parameter(1)
ROOT gather = s32[10] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=0,
slice_sizes={10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateAllGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
}
TEST_F(GatherExpanderTest, GatherIsBroadcast) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[1,3] parameter(0)
indices = s32[7,5] parameter(1)
ROOT gather = s32[7,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
ASSERT_TRUE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kBroadcast}));
module->VerifyOrAddFailure("after-gather-expander.");
}
}
} |
1,890 | cpp | tensorflow/tensorflow | stable_sort_expander | third_party/xla/xla/service/stable_sort_expander.cc | third_party/xla/xla/service/stable_sort_expander_test.cc | #ifndef XLA_SERVICE_STABLE_SORT_EXPANDER_H_
#define XLA_SERVICE_STABLE_SORT_EXPANDER_H_
#include <cstdint>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class StableSortExpander : public OpExpanderPass {
public:
absl::string_view name() const override { return "stable-sort-expander"; }
static int64_t IotaOperandIndexForStableSort(const HloSortInstruction& sort);
private:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/stable_sort_expander.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
int64_t StableSortExpander::IotaOperandIndexForStableSort(
const HloSortInstruction& sort) {
for (const HloInstruction* operand : sort.operands()) {
if (operand->opcode() == HloOpcode::kIota &&
Cast<HloIotaInstruction>(operand)->iota_dimension() ==
sort.sort_dimension() &&
operand->shape().element_type() == S32) {
return sort.operand_index(operand);
}
}
return -1;
}
absl::StatusOr<HloInstruction*> StableSortExpander::ExpandInstruction(
HloInstruction* instruction) {
auto* sort = Cast<HloSortInstruction>(instruction);
HloComputation* computation = sort->parent();
HloInstruction* expanded_sort = nullptr;
absl::flat_hash_set<int64_t> used_indices;
int64_t iota_index = IotaOperandIndexForStableSort(*sort);
if (iota_index == -1) {
Shape iota_shape = sort->operand(0)->shape();
if (iota_shape.dimensions(sort->sort_dimension()) >
std::numeric_limits<int32_t>::max()) {
return Unimplemented(
"Stable sorting of more than 2^31-1 elements is not implemented");
}
iota_shape.set_element_type(S32);
auto iota = computation->AddInstruction(
HloInstruction::CreateIota(iota_shape, sort->sort_dimension()));
auto comparator = sort->to_apply();
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
std::vector<std::unique_ptr<HloInstruction>> extra_parameters;
std::vector<HloInstruction*> extra_parameter_ptrs;
Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
extra_parameters.push_back(HloInstruction::CreateParameter(
sort->operand_count() * 2, scalar_shape,
absl::StrCat("p.", sort->operand_count(), ".lhs")));
extra_parameter_ptrs.push_back(extra_parameters.back().get());
extra_parameters.push_back(HloInstruction::CreateParameter(
sort->operand_count() * 2 + 1, scalar_shape,
absl::StrCat("p.", sort->operand_count(), ".rhs")));
extra_parameter_ptrs.push_back(extra_parameters.back().get());
sort->set_to_apply(sort->GetModule()->AddEmbeddedComputation(
comparator->CloneWithReplacements(&replacements,
extra_parameter_ptrs)));
std::vector<HloInstruction*> new_operands(sort->operands().begin(),
sort->operands().end());
new_operands.push_back(iota);
std::vector<Shape> new_shapes = sort->operand_count() == 1
? std::vector<Shape>{sort->shape()}
: sort->shape().tuple_shapes();
new_shapes.push_back(iota_shape);
Shape new_sort_shape = ShapeUtil::MakeTupleShape(new_shapes);
HloInstruction* new_sort = computation->AddInstruction(
sort->CloneWithNewOperands(new_sort_shape, new_operands));
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
tuple_elements.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
sort->operand(i)->shape(), new_sort, i)));
}
expanded_sort = tuple_elements[0];
if (tuple_elements.size() > 1) {
expanded_sort = computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
sort = Cast<HloSortInstruction>(new_sort);
iota_index = sort->operand_count() - 1;
}
auto comparator = sort->to_apply();
std::vector<HloInstruction*> instructions_postorder =
comparator->MakeInstructionPostOrder();
absl::flat_hash_map<HloInstruction*, HloInstruction*> replacements;
auto replace = [&](HloInstruction* instr) {
auto it = replacements.find(instr);
if (it == replacements.end()) {
return instr;
}
return it->second;
};
HloInstruction* old_root = comparator->root_instruction();
for (int64_t i = 0; i < comparator->num_parameters(); ++i) {
replacements[comparator->parameter_instruction(i)] =
comparator->parameter_instruction(i ^ 1);
}
HloInstruction* cloned_root = nullptr;
for (HloInstruction* inst : instructions_postorder) {
if (inst->operand_count() == 0) {
continue;
}
std::vector<HloInstruction*> new_operands;
new_operands.reserve(inst->operand_count());
for (HloInstruction* operand : inst->operands()) {
new_operands.push_back(replace(operand));
}
auto new_instruction =
inst->CloneWithNewOperands(inst->shape(), new_operands);
replacements[inst] = new_instruction.get();
if (inst == old_root) {
cloned_root = new_instruction.get();
}
comparator->AddInstruction(std::move(new_instruction));
}
CHECK_NE(cloned_root, nullptr);
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
HloInstruction* same =
comparator->AddInstruction(HloInstruction::CreateCompare(
scalar_pred, old_root, cloned_root, ComparisonDirection::kEq));
HloInstruction* tie_breaker =
comparator->AddInstruction(HloInstruction::CreateCompare(
scalar_pred, comparator->parameter_instruction(2 * iota_index),
comparator->parameter_instruction(2 * iota_index + 1),
ComparisonDirection::kLt));
HloInstruction* new_root =
comparator->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kSelect, same, tie_breaker,
old_root));
comparator->set_root_instruction(new_root);
return expanded_sort;
}
bool StableSortExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kSort &&
Cast<HloSortInstruction>(instruction)->is_stable();
}
} | #include "xla/service/stable_sort_expander.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using StableSortExpanderTest = HloTestBase;
bool IsSameComputationExceptParams(const HloInstruction* a,
const HloInstruction* b) {
if (a->opcode() != b->opcode() || a->operand_count() != b->operand_count()) {
return false;
}
if (a->opcode() == HloOpcode::kParameter) {
return a->parameter_number() == (b->parameter_number() ^ 1);
}
if (a->operand_count() == 0) {
return a == b;
}
for (int64_t i = 0; i < a->operand_count(); ++i) {
if (!IsSameComputationExceptParams(a->operand(i), b->operand(i))) {
return false;
}
}
return true;
}
void CheckComputationHasTieBreaker(const HloInstruction* root,
int64_t iota_parameter) {
ASSERT_EQ(root->opcode(), HloOpcode::kSelect);
ASSERT_EQ(root->operand(0)->opcode(), HloOpcode::kCompare);
ASSERT_EQ(root->operand(0)->comparison_direction(), ComparisonDirection::kEq);
EXPECT_THAT(root->operand(1),
GmockMatch(m::Lt(m::Parameter(iota_parameter * 2),
m::Parameter(iota_parameter * 2 + 1))));
EXPECT_EQ(root->operand(2), root->operand(0)->operand(0));
EXPECT_TRUE(IsSameComputationExceptParams(root->operand(0)->operand(0),
root->operand(0)->operand(1)));
}
TEST_F(StableSortExpanderTest, StabilizeSortReuseIotaOperand) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest,
StabilizeSortReuseIotaOperandComplicatedComparison) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
max = u32[] constant(2147483647)
zero = s32[] constant(0)
lhs.signed = s32[] bitcast-convert(p.0.lhs)
lhs.unsigned = u32[] bitcast-convert(p.0.lhs)
lhs.flipped = u32[] subtract(max, lhs.unsigned)
lhs.flipped.signed = s32[] bitcast-convert(lhs.flipped)
lhs.is_negative = pred[] compare(lhs.flipped.signed, zero), direction=LT
lhs.converted = s32[] select(lhs.is_negative, lhs.flipped.signed, lhs.signed)
rhs.signed = s32[] bitcast-convert(p.0.rhs)
rhs.unsigned = u32[] bitcast-convert(p.0.rhs)
rhs.flipped = u32[] subtract(max, rhs.unsigned)
rhs.flipped.signed = s32[] bitcast-convert(rhs.flipped)
rhs.is_negative = pred[] compare(rhs.flipped.signed, zero), direction=LT
rhs.converted = s32[] select(rhs.is_negative, rhs.flipped.signed, rhs.signed)
ROOT lt = pred[] compare(lhs.converted, rhs.converted), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest, StabilizeSortAddIotaOperandAndChangeRoot) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
ROOT sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, GmockMatch(m::Tuple(
m::GetTupleElement(
m::Sort(m::Parameter(0), m::Parameter(1), m::Iota()), 0),
m::GetTupleElement(
m::Sort(m::Parameter(0), m::Parameter(1), m::Iota()), 1))));
CheckComputationHasTieBreaker(
root->operand(0)->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, HonorIsStableFlag) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=false
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_FALSE(stabilizer.Run(module.get()).value());
}
TEST_F(StableSortExpanderTest,
StabilizeSortDontReuseIotaOperandWrongDimension) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=0
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
AlgebraicSimplifier simplifier(AlgebraicSimplifierOptions(
[](const Shape&, const Shape&) { return false; }));
ASSERT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota(), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, StabilizeSortDontReuseIotaOperandWrongType) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = f32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, f32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
AlgebraicSimplifier simplifier(AlgebraicSimplifierOptions(
[](const Shape&, const Shape&) { return false; }));
ASSERT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota(), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, StabilizeSortR1) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
mask = s32[] constant(65535)
lhs = s32[] and(p.0.lhs, mask)
rhs = s32[] and(p.0.rhs, mask)
ROOT lt = pred[] compare(lhs, rhs), direction=LT
}
ENTRY sort_computation {
keys = s32[64,8732]{1,0} parameter(0)
ROOT sort = s32[64,8732]{1,0} sort(keys), dimensions={0}, to_apply=compare,
is_stable=true
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest, StabilizeSortR1NoRoot) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
mask = s32[] constant(65535)
lhs = s32[] and(p.0.lhs, mask)
rhs = s32[] and(p.0.rhs, mask)
ROOT lt = pred[] compare(lhs, rhs), direction=LT
}
ENTRY sort_computation {
keys = s32[64,8732]{1,0} parameter(0)
sort = s32[64,8732]{1,0} sort(keys), dimensions={0}, to_apply=compare,
is_stable=true
ROOT neg = s32[64,8732]{1,0} negate(sort)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Negate(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0))));
CheckComputationHasTieBreaker(
root->operand(0)->operand(0)->to_apply()->root_instruction(),
1);
}
}
} |
1,891 | cpp | tensorflow/tensorflow | collectives_schedule_linearizer | third_party/xla/xla/service/collectives_schedule_linearizer.cc | third_party/xla/xla/service/collectives_schedule_linearizer_test.cc | #ifndef XLA_SERVICE_COLLECTIVES_SCHEDULE_LINEARIZER_H_
#define XLA_SERVICE_COLLECTIVES_SCHEDULE_LINEARIZER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
class CollectivesScheduleLinearizer : public HloModulePass {
public:
explicit CollectivesScheduleLinearizer(HloModulePredicate is_enabled = {})
: is_enabled_(is_enabled) {}
absl::string_view name() const override {
return "collectives-schedule-linearizer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloModulePredicate is_enabled_;
};
}
#endif
#include "xla/service/collectives_schedule_linearizer.h"
#include <algorithm>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> CollectivesScheduleLinearizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (is_enabled_ && !is_enabled_(module)) {
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::unique_ptr<HloReachabilityMap> reachability;
HloInstruction* prev_done = nullptr;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
auto* next = DynCast<HloCollectiveInstruction>(inst);
if (!next) {
continue;
}
if (!reachability) {
reachability = HloReachabilityMap::Build(computation);
}
HloInstruction* start = next;
HloInstruction* done = next;
switch (next->opcode()) {
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllGatherStart:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kAsyncStart:
CHECK_EQ(start->user_count(), 1);
done = start->users()[0];
break;
default:
break;
}
if (prev_done && !reachability->IsConnected(start, prev_done)) {
TF_RETURN_IF_ERROR(prev_done->AddControlDependencyTo(next));
VLOG(1) << "Adding control dependency from " << prev_done->ToString()
<< " to " << start->ToString();
changed = true;
}
prev_done = done;
}
}
return changed;
}
} | #include "xla/service/collectives_schedule_linearizer.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace m = match;
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
class CollectivesScheduleLinearizerTest : public HloTestBase {
protected:
void InsertCollectivesSchedule(HloModule* module) {
CollectivesScheduleLinearizer collectives_schedule_linearizer;
ASSERT_IS_OK(collectives_schedule_linearizer.Run(module).status());
}
};
TEST_F(CollectivesScheduleLinearizerTest, FixOrdering) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
HloInstruction *c1 = nullptr, *c2 = nullptr;
for (HloInstruction* instr : module->entry_computation()->instructions()) {
if (Match(instr, m::AllReduce(m::Parameter(0)))) {
c1 = instr;
}
if (Match(instr, m::AllReduce(m::Parameter(1)))) {
c2 = instr;
}
}
EXPECT_TRUE(c1 != nullptr && c2 != nullptr);
EXPECT_TRUE(absl::c_linear_search(c2->control_predecessors(), c1));
}
TEST_F(CollectivesScheduleLinearizerTest, NoFixRequired) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum, control-predecessors={c1}
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
}
TEST_F(CollectivesScheduleLinearizerTest, DependentCollectives) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(c1), replica_groups={}, to_apply=sum
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 0);
}
TEST_F(CollectivesScheduleLinearizerTest, NonPostorder) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
c3 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
t = f32[100] add(c1, c2)
ROOT out = f32[100] add(t, c3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(
module->entry_computation()
->GetInstructionWithName("c3")
->AddControlDependencyTo(
module->entry_computation()->GetInstructionWithName("c1")));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 2);
}
TEST_F(CollectivesScheduleLinearizerTest, AsyncOrdering) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
ars0 = f32[100] all-reduce-start(p0), replica_groups={}, to_apply=sum
ard0 = f32[100] all-reduce-done(ars0)
ars1 = f32[100] all-reduce-start(p1), replica_groups={}, to_apply=sum
ard1 = f32[100] all-reduce-done(ars1)
ROOT out = f32[100] add(ard0, ard1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
const HloInstruction *root = module->entry_computation()->root_instruction();
const HloInstruction *ard0 = root->operand(0);
const HloInstruction *ard1 = root->operand(1);
EXPECT_EQ(ard0->opcode(), HloOpcode::kAllReduceDone);
EXPECT_EQ(ard1->opcode(), HloOpcode::kAllReduceDone);
const HloInstruction *ars1 = ard1->operand(0);
EXPECT_EQ(ars1->opcode(), HloOpcode::kAllReduceStart);
EXPECT_TRUE(absl::c_linear_search(ars1->control_predecessors(), ard0));
}
}
} |
1,892 | cpp | tensorflow/tensorflow | hlo_liveness_analysis | third_party/xla/xla/service/hlo_liveness_analysis.cc | third_party/xla/xla/service/hlo_liveness_analysis_test.cc | #ifndef XLA_SERVICE_HLO_LIVENESS_ANALYSIS_H_
#define XLA_SERVICE_HLO_LIVENESS_ANALYSIS_H_
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
namespace xla {
class HloLivenessAnalysis {
public:
using HloIndexMap = absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<ShapeTree<bool>>>;
static absl::StatusOr<std::unique_ptr<HloLivenessAnalysis>> Run(
const HloModule& module);
bool IsLive(const HloInstruction* instruction,
const ShapeIndex& shape_index) const;
private:
HloLivenessAnalysis(const HloModule& module);
void RunAnalysis();
const HloModule& module_;
std::unique_ptr<CallGraph> call_graph_;
HloIndexMap live_index_map_;
};
}
#endif
#include "xla/service/hlo_liveness_analysis.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
using Worklist = std::deque<const HloInstruction*>;
using Workset = absl::flat_hash_set<const HloInstruction*>;
void AddToWorklist(const HloInstruction* instruction, Worklist* worklist,
Workset* workset) {
if (workset->insert(instruction).second) {
worklist->push_back(instruction);
VLOG(3) << "ADD instruction: " << instruction->name();
}
}
using VisitorFunction = absl::FunctionRef<void(const ShapeIndex& )>;
void ForEachLiveIndex(const ShapeTree<bool>& index_tree, VisitorFunction func) {
index_tree.ForEachElement([&](const ShapeIndex& shape_index, bool live) {
if (live) {
func(shape_index);
}
});
}
void MarkLiveAtIndex(const HloInstruction* instruction,
const ShapeIndex& shape_index,
HloLivenessAnalysis::HloIndexMap* live_index_map,
Worklist* worklist, Workset* workset) {
std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction];
if (liveness == nullptr) {
liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(),
false);
}
bool& alive = *liveness->mutable_element(shape_index);
if (!alive) {
AddToWorklist(instruction, worklist, workset);
alive = true;
VLOG(3) << "MARK instruction: " << instruction->name()
<< " shape_index: " << shape_index;
}
}
void MarkLiveAtAllIndices(const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map,
Worklist* worklist, Workset* workset) {
bool add_to_worklist = false;
std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction];
if (liveness == nullptr) {
liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(),
true);
add_to_worklist = true;
} else {
for (auto& entry : *liveness) {
if (!entry.second) {
add_to_worklist = true;
entry.second = true;
VLOG(3) << "MARK instruction: " << instruction->name()
<< " shape_index: " << entry.first;
}
}
}
if (add_to_worklist) {
AddToWorklist(instruction, worklist, workset);
}
}
void PropagateLivenessThroughTuple(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kTuple);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
const size_t size = shape_index.size();
if (size == 0) {
return;
}
const int64_t operand_index = shape_index[0];
if (operand_index >= instruction->operand_count()) {
return;
}
MarkLiveAtIndex(instruction->operand(operand_index), {}, live_index_map,
worklist, workset);
ShapeIndex operand_shape_index(size - 1);
for (int i = 1; i < size; ++i) {
operand_shape_index[i - 1] = shape_index[i];
}
MarkLiveAtIndex(instruction->operand(operand_index), operand_shape_index,
live_index_map, worklist, workset);
});
}
void PropagateLivenessThroughGTE(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kGetTupleElement);
MarkLiveAtIndex(instruction->operand(0), {}, live_index_map, worklist,
workset);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
ShapeIndex operand_shape_index(shape_index);
operand_shape_index.push_front(instruction->tuple_index());
MarkLiveAtIndex(instruction->operand(0), operand_shape_index,
live_index_map, worklist, workset);
});
}
void PropagateLivenessThroughWhile(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kWhile);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(instruction->while_body()->root_instruction(), shape_index,
live_index_map, worklist, workset);
MarkLiveAtIndex(instruction->operand(0), shape_index, live_index_map,
worklist, workset);
});
MarkLiveAtIndex(instruction->while_condition()->root_instruction(), {},
live_index_map, worklist, workset);
}
void PropagateLivenessToParameterCallers(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset, CallGraph* call_graph) {
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
const CallGraphNode& call_graph_node =
call_graph->GetNode(instruction->parent());
if (call_graph_node.context() == CallContext::kControlFlow) {
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
auto* xla_while = callsite.instruction();
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(xla_while, shape_index, live_index_map, worklist,
workset);
MarkLiveAtIndex(xla_while->while_body()->root_instruction(),
shape_index, live_index_map, worklist, workset);
MarkLiveAtIndex(xla_while->operand(0), shape_index, live_index_map,
worklist, workset);
});
}
}
}
}
void PropagateLivenessThroughControlFlow(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset, CallGraph* call_graph) {
const CallGraphNode& call_graph_node =
call_graph->GetNode(instruction->parent());
if (call_graph_node.context() == CallContext::kControlFlow) {
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
HloInstruction* caller = callsite.instruction();
if (caller->opcode() == HloOpcode::kWhile) {
MarkLiveAtIndex(caller->while_condition()->root_instruction(), {},
live_index_map, worklist, workset);
} else if (caller->opcode() == HloOpcode::kConditional) {
MarkLiveAtIndex(caller->operand(0), {}, live_index_map, worklist,
workset);
MarkLiveAtIndex(caller, {}, live_index_map, worklist, workset);
const HloComputation* callee_comp = instruction->parent();
int64_t operand_index = 1;
for (auto* caller_comp : caller->called_computations()) {
if (callee_comp == caller_comp) {
MarkLiveAtIndex(caller->operand(operand_index), {}, live_index_map,
worklist, workset);
if (instruction->opcode() == HloOpcode::kParameter) {
const ShapeTree<bool>& index_tree =
*live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(caller->operand(operand_index), shape_index,
live_index_map, worklist, workset);
});
}
break;
}
++operand_index;
}
}
}
}
}
}
HloLivenessAnalysis::HloLivenessAnalysis(const HloModule& module)
: module_(module), call_graph_(CallGraph::Build(&module)) {}
void HloLivenessAnalysis::RunAnalysis() {
Worklist worklist;
Workset workset;
MarkLiveAtAllIndices(module_.entry_computation()->root_instruction(),
&live_index_map_, &worklist, &workset);
for (auto* computation : module_.computations()) {
for (auto* instruction : computation->instructions()) {
if (instruction->HasSideEffectNoRecurse()) {
MarkLiveAtAllIndices(instruction, &live_index_map_, &worklist,
&workset);
}
}
}
while (!worklist.empty()) {
const HloInstruction* instruction = worklist.front();
worklist.pop_front();
workset.erase(workset.find(instruction));
VLOG(1) << "VISIT instruction: " << instruction->name();
if (instruction->opcode() == HloOpcode::kTuple) {
PropagateLivenessThroughTuple(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kGetTupleElement) {
PropagateLivenessThroughGTE(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kWhile) {
PropagateLivenessThroughWhile(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kParameter) {
PropagateLivenessToParameterCallers(instruction, &live_index_map_,
&worklist, &workset,
call_graph_.get());
} else {
for (auto* called_computation : instruction->called_computations()) {
MarkLiveAtAllIndices(called_computation->root_instruction(),
&live_index_map_, &worklist, &workset);
}
for (HloInstruction* operand : instruction->operands()) {
MarkLiveAtAllIndices(operand, &live_index_map_, &worklist, &workset);
}
}
PropagateLivenessThroughControlFlow(instruction, &live_index_map_,
&worklist, &workset, call_graph_.get());
}
}
bool HloLivenessAnalysis::IsLive(const HloInstruction* instruction,
const ShapeIndex& shape_index) const {
auto it = live_index_map_.find(instruction);
return (it != live_index_map_.end()) && it->second->element(shape_index);
}
absl::StatusOr<std::unique_ptr<HloLivenessAnalysis>> HloLivenessAnalysis::Run(
const HloModule& module) {
VLOG(1) << "HloLivenessAnalysis::Run on module " << module.name();
XLA_VLOG_LINES(2, module.ToString());
auto liveness_analysis = absl::WrapUnique(new HloLivenessAnalysis(module));
liveness_analysis->RunAnalysis();
return std::move(liveness_analysis);
}
} | #include "xla/service/hlo_liveness_analysis.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloLivenessAnalysisTest : public HloTestBase {
protected:
HloLivenessAnalysisTest() {}
const HloLivenessAnalysis& RunLiveness(HloModule* module) {
liveness_ = HloLivenessAnalysis::Run(*module).value();
return *liveness_;
}
HloInstruction* GetInstruction(HloModule* module, const std::string& name) {
HloInstruction* to_return = nullptr;
for (auto* comp : module->computations()) {
for (auto* inst : comp->instructions()) {
if (inst->name() == name) {
to_return = inst;
break;
}
}
}
return CHECK_NOTNULL(to_return);
}
std::unique_ptr<HloLivenessAnalysis> liveness_;
};
TEST_F(HloLivenessAnalysisTest, AddAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
ROOT add = s32[] add(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, DeadAdd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
add.1 = s32[] add(constant.1, constant.2)
ROOT add.2 = s32[] add(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {}));
}
TEST_F(HloLivenessAnalysisTest, TupleAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
ROOT tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, NestedTupleAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(1)
constant.2 = s32[] constant(2)
constant.3 = s32[] constant(3)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
ROOT tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT get-tuple-element.1 = s32[] get-tuple-element(tuple.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfNestedTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
constant.3 = s32[] constant(2)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
ROOT get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {0}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfGteOfNestedTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
constant.3 = s32[] constant(2)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1
ROOT get-tuple-element.2 = s32[] get-tuple-element(get-tuple-element.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {0}));
EXPECT_FALSE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_FALSE(
liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while.0), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.4"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.0"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileCondPropagatesLiveness) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
add_S32 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
get-tuple-element.4 = s32[3]{0} get-tuple-element(loop_var.2), index=1
zero = s32[] constant(0)
reduce = s32[] reduce(get-tuple-element.4, zero), dimensions={0}, to_apply=add_S32
add.1 = s32[] add(get-tuple-element.3, reduce)
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(add.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.5 = s32[] get-tuple-element(while.0), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.4"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithLiveTupleElements) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.1), index=2
multiply.1 = s32[] multiply(get-tuple-element.3, get-tuple-element.3)
ROOT tuple.1 = (s32[], s32[], s32[]) tuple(add.1, get-tuple-element.3, multiply.1)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0
constant.1 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.4, constant.1), direction=LT
}
ENTRY SimpleLoop {
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
constant.4 = s32[] constant(2)
tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.3, constant.4)
while.1 = (s32[], s32[], s32[]) while(tuple.2), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.5 = s32[] get-tuple-element(while.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {2}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
WhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
WhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT rtuple = () tuple()
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, NestedWhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
InnerWhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
InnerWhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
OuterWhileCondition {
cond_param.2 = (s32[]) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(cond_param.2), index=0
constant.5 = s32[] constant(5)
ROOT less-than.2 = pred[] compare(get-tuple-element.5, constant.5), direction=LT
}
OuterWhileBody {
body_param.2 = (s32[]) parameter(0)
get-tuple-element.8 = s32[] get-tuple-element(body_param.2), index=0
constant.6 = s32[] constant(0)
tuple.2 = (s32[]) tuple(constant.6)
inner_while = (s32[]) while(tuple.2), condition=InnerWhileCondition,
body=InnerWhileBody
constant.7 = s32[] constant(1)
add.2 = s32[] add(get-tuple-element.8, constant.7)
ROOT rtuple = (s32[]) tuple(add.2)
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=OuterWhileCondition,
body=OuterWhileBody
ROOT rtuple = () tuple()
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, PropagateLivenessFromConditionalComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule main.67
%region_0.10 (Arg_0.11: (s32[], s32[], f32[1024,3], s32[1])) -> (s32[], s32[], f32[1024,3], s32[1]) {
%Arg_0.11 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)
%get-tuple-element.17 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=0, metadata={op_name="while"}
%constant.13 = s32[] constant(1)
%add.25 = s32[] add(s32[] %get-tuple-element.17, s32[] %constant.13), metadata={op_name="while/add_1"}
%get-tuple-element.18 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=1, metadata={op_name="while"}
%add.22 = s32[] add(s32[] %get-tuple-element.18, s32[] %constant.13), metadata={op_name="while/add"}
%get-tuple-element.19 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=2, metadata={op_name="while"}
%constant.16 = f32[] constant(0)
%constant.15 = f32[] constant(1)
%rng.21 = f32[3]{0} rng(f32[] %constant.16, f32[] %constant.15), distribution=rng_uniform, metadata={op_name="while/random_uniform/RandomUniform"}
%reshape.23 = f32[1,3]{1,0} reshape(f32[3]{0} %rng.21), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"}
%constant.12 = s32[] constant(0)
%dynamic-update-slice.24 = f32[1024,3]{1,0} dynamic-update-slice(f32[1024,3]{1,0} %get-tuple-element.19, f32[1,3]{1,0} %reshape.23, s32[] %get-tuple-element.18, s32[] %constant.12), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"}
%get-tuple-element.20 = s32[1]{0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=3, metadata={op_name="while"}
ROOT %tuple.26 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %add.25, s32[] %add.22, f32[1024,3]{1,0} %dynamic-update-slice.24, s32[1]{0} %get-tuple-element.20), metadata={op_name="while"}
}
%region_1.27 (Arg_0.28: (s32[], s32[], f32[1024,3], s32[1])) -> pred[] {
%Arg_0.28 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)
%get-tuple-element.30 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.28), index=1, metadata={op_name="while"}
%constant.29 = s32[] constant(1024)
ROOT %compare.31 = pred[] compare(s32[] %get-tuple-element.30, s32[] %constant.29), direction=LT, metadata={op_name="while/Less"}
}
%region_2.42 (Arg_0.43: (f32[3,32,32,3], token[])) -> (pred[], token[]) {
%constant.44 = pred[] constant(true)
%Arg_0.43 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)
%get-tuple-element.52 = f32[3,32,32,3]{3,2,1,0} get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=0, metadata={op_name="image_sample/write_summary/summary_cond"}
%constant.49 = f32[] constant(255.5)
%broadcast.50 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.49), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"}
%multiply.53 = f32[3,32,32,3]{3,2,1,0} multiply(f32[3,32,32,3]{3,2,1,0} %get-tuple-element.52, f32[3,32,32,3]{3,2,1,0} %broadcast.50), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"}
%constant.47 = f32[] constant(0)
%broadcast.48 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.47), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"}
%maximum.54 = f32[3,32,32,3]{3,2,1,0} maximum(f32[3,32,32,3]{3,2,1,0} %multiply.53, f32[3,32,32,3]{3,2,1,0} %broadcast.48), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"}
%constant.45 = f32[] constant(255)
%broadcast.46 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.45), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"}
%minimum.55 = f32[3,32,32,3]{3,2,1,0} minimum(f32[3,32,32,3]{3,2,1,0} %maximum.54, f32[3,32,32,3]{3,2,1,0} %broadcast.46), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"}
%convert.56 = u8[3,32,32,3]{3,2,1,0} convert(f32[3,32,32,3]{3,2,1,0} %minimum.55), metadata={op_name="image_sample/write_summary/summary_cond/convert_image"}
%get-tuple-element.51 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=1, metadata={op_name="image_sample/write_summary/summary_cond"}
%send.57 = (u8[3,32,32,3]{3,2,1,0}, u32[], token[]) send(u8[3,32,32,3]{3,2,1,0} %convert.56, token[] %get-tuple-element.51), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"}
%send-done.58 = token[] send-done((u8[3,32,32,3]{3,2,1,0}, u32[], token[]) %send.57), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"}
ROOT %tuple.59 = (pred[], token[]) tuple(pred[] %constant.44, token[] %send-done.58), metadata={op_name="image_sample/write_summary/summary_cond"}
}
%region_3.60 (Arg_0.61: (f32[3,32,32,3], token[])) -> (pred[], token[]) {
%constant.62 = pred[] constant(false)
%Arg_0.61 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)
%get-tuple-element.63 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.61), index=1, metadata={op_name="image_sample/write_summary/summary_cond"}
ROOT %tuple.64 = (pred[], token[]) tuple(pred[] %constant.62, token[] %get-tuple-element.63), metadata={op_name="image_sample/write_summary/summary_cond"}
}
ENTRY %main.67 (arg_tuple.1: (s32[])) -> () {
%arg_tuple.1 = (s32[]{:T(256)}) parameter(0)
%get-tuple-element.2 = s32[]{:T(256)} get-tuple-element((s32[]{:T(256)}) %arg_tuple.1), index=0
%constant.3 = s32[] constant(0)
%compare.8 = pred[]{:T(256)} compare(s32[]{:T(256)} %get-tuple-element.2, s32[] %constant.3), direction=EQ, metadata={op_name="image_sample/write_summary/Equal"}
%constant.5 = f32[] constant(0)
%broadcast.6 = f32[1024,3]{1,0} broadcast(f32[] %constant.5), dimensions={}, metadata={op_name="tokens_accumulator"}
%constant.4 = s32[1]{0} constant({1024})
%tuple.9 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %constant.3, s32[] %constant.3, f32[1024,3]{1,0} %broadcast.6, s32[1]{0} %constant.4), metadata={op_name="while"}
%while.32 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) while((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %tuple.9), condition=%region_1.27, body=%region_0.10, metadata={op_name="while"}
%get-tuple-element.33 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %while.32), index=2, metadata={op_name="while"}
%transpose.34 = f32[3,1024]{0,1} transpose(f32[1024,3]{1,0} %get-tuple-element.33), dimensions={1,0}, metadata={op_name="transpose.transpose/perm"}
%reshape.35 = f32[3,32,32,1]{3,2,1,0} reshape(f32[3,1024]{0,1} %transpose.34), metadata={op_name="Reshape"}
%broadcast.36 = f32[3,32,32,1]{3,2,1,0} broadcast(f32[3,32,32,1]{3,2,1,0} %reshape.35), dimensions={0,1,2,3}, metadata={op_name="Tile"}
%reshape.37 = f32[3,32,32]{2,1,0} reshape(f32[3,32,32,1]{3,2,1,0} %broadcast.36), metadata={op_name="Tile"}
%broadcast.38 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[3,32,32]{2,1,0} %reshape.37), dimensions={0,1,2}, metadata={op_name="Tile"}
%after-all.7 = token[] after-all(), metadata={op_name="image_sample/write_summary/summary_cond"}
%send.39 = (pred[]{:T(256)}, u32[], token[]) send(pred[]{:T(256)} %compare.8, token[] %after-all.7), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"}
%send-done.40 = token[] send-done((pred[]{:T(256)}, u32[], token[]) %send.39), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"}
%tuple.41 = (f32[3,32,32,3]{3,2,1,0}, token[]) tuple(f32[3,32,32,3]{3,2,1,0} %broadcast.38, token[] %send-done.40), metadata={op_name="image_sample/write_summary/summary_cond"}
%conditional.65 = (pred[], token[]) conditional(pred[]{:T(256)} %compare.8, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41), true_computation=%region_2.42, false_computation=%region_3.60, metadata={op_name="image_sample/write_summary/summary_cond"}
ROOT %tuple.66 = () tuple()
}
)")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "conditional.65"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.41"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.33"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.32"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "dynamic-update-slice.24"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "send.57"), {}));
}
}
} |
1,893 | cpp | tensorflow/tensorflow | transfer_manager | third_party/xla/xla/service/transfer_manager.cc | third_party/xla/xla/tests/transfer_manager_test.cc | #ifndef XLA_SERVICE_TRANSFER_MANAGER_H_
#define XLA_SERVICE_TRANSFER_MANAGER_H_
#include <cstdint>
#include <functional>
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/xla_data.pb.h"
namespace xla {
class TransferManager {
public:
virtual ~TransferManager() = default;
virtual se::Platform::Id PlatformId() const = 0;
virtual Shape HostShapeToDeviceShape(const Shape& host_shape) const {
return ShapeUtil::DeviceShapeToHostShape(host_shape);
}
class TransferMetadata {
public:
virtual ~TransferMetadata() = default;
};
absl::StatusOr<Literal> TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata = nullptr);
absl::Status TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const MutableBorrowingLiteral& literal,
const TransferMetadata* transfer_metadata = nullptr);
virtual void TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
MutableBorrowingLiteral literal, std::function<void(absl::Status)> done,
const TransferMetadata* transfer_metadata) = 0;
void TransferLiteralFromDevice(se::Stream* stream,
const ShapedBuffer& device_buffer,
MutableBorrowingLiteral literal,
std::function<void(absl::Status)> done) {
return TransferLiteralFromDevice(stream, device_buffer, literal, done,
nullptr);
}
absl::Status TransferLiteralToDevice(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata = nullptr);
virtual absl::Status TransferLiteralToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) = 0;
absl::Status TransferLiteralToDeviceAsync(se::Stream* stream,
const LiteralSlice& literal,
const ShapedBuffer& device_buffer) {
return TransferLiteralToDeviceAsync(stream, literal, device_buffer,
nullptr);
}
absl::Status TransferArrayToDevice(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata = nullptr);
absl::Status TransferArrayToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata = nullptr);
absl::StatusOr<Literal> TransferArrayFromDevice(
se::Stream* stream, const Shape& shape,
const se::DeviceMemoryBase& source,
const TransferMetadata* transfer_metadata = nullptr);
virtual absl::Status ReadDynamicShapes(se::Stream* stream,
const ShapedBuffer* device_buffer,
Shape* device_shape);
virtual absl::Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) = 0;
virtual absl::Status TransferLiteralFromOutfeed(
se::StreamExecutor* executor, MutableBorrowingLiteral literal) = 0;
virtual absl::Status ResetDevices(
absl::Span<se::StreamExecutor* const> executor) = 0;
absl::Status WriteTupleIndexTables(se::Stream* stream,
const ShapedBuffer& device_buffer);
absl::Status WriteTupleIndexTablesAsync(se::Stream* stream,
const ShapedBuffer& device_buffer);
absl::Status WriteRootTupleIndexTable(se::Stream* stream,
const ShapedBuffer& device_buffer);
absl::Status WriteRootTupleIndexTable(
se::Stream* stream,
const ShapeTree<MaybeOwningDeviceMemory>& buffer_tree);
virtual int64_t GetByteSizeRequirement(const Shape& shape) const = 0;
virtual absl::StatusOr<Shape> ChooseCompactLayoutForShape(
const Shape& host_shape) const;
virtual Shape ChooseGoodInfeedLayout(const Shape& shape) const;
typedef std::function<Shape(const Shape&)> DeviceShapeRepresentationFn;
absl::StatusOr<ScopedShapedBuffer> AllocateScopedShapedBuffer(
const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
int device_ordinal,
DeviceShapeRepresentationFn shape_representation_fn = nullptr);
virtual bool CanShapedBufferBeAccessedNow(
se::StreamExecutor* executor, const ShapedBuffer& device_buffer) const {
return false;
}
virtual bool CanBufferBeAccessedNow(
se::StreamExecutor* executor,
const se::DeviceMemoryBase& device_buffer) const {
return false;
}
typedef std::unique_ptr<TransferManager> (*TransferManagerCreationFunction)();
static void RegisterTransferManager(
se::Platform::Id platform_id,
TransferManagerCreationFunction transfer_manager);
static absl::StatusOr<TransferManager*> GetForPlatform(
const se::Platform* platform);
virtual absl::Status WriteSingleTupleIndexTable(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) = 0;
virtual bool PackSubbyteTypes() const { return false; }
private:
static absl::Mutex platform_transfer_manager_mutex_;
struct State {
std::unique_ptr<TransferManager> manager;
TransferManagerCreationFunction creation_function = nullptr;
};
static absl::flat_hash_map<se::Platform::Id, State>*
GetPlatformTransferManagers();
};
}
#endif
#include "xla/service/transfer_manager.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/const_init.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/literal.h"
#include "xla/service/compiler.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Mutex TransferManager::platform_transfer_manager_mutex_(
absl::kConstInit);
absl::flat_hash_map<se::Platform::Id, TransferManager::State>*
TransferManager::GetPlatformTransferManagers() {
static auto* r =
new absl::flat_hash_map<se::Platform::Id, TransferManager::State>;
return r;
}
absl::StatusOr<Literal> TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) {
Literal literal(device_buffer.on_host_shape());
TF_RETURN_IF_ERROR(TransferLiteralFromDevice(stream, device_buffer, &literal,
transfer_metadata));
return std::move(literal);
}
absl::Status TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const MutableBorrowingLiteral& literal,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
absl::Status ret;
tsl::Notification n;
TransferLiteralFromDevice(
substream, device_buffer, literal,
[&](absl::Status status) {
ret = status;
n.Notify();
},
transfer_metadata);
n.WaitForNotification();
return ret;
}
absl::Status TransferManager::TransferLiteralToDevice(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
TF_RETURN_IF_ERROR(TransferLiteralToDeviceAsync(
substream, literal, device_buffer, transfer_metadata));
return substream->BlockHostUntilDone();
}
absl::StatusOr<Literal> TransferManager::TransferArrayFromDevice(
se::Stream* stream, const Shape& shape, const se::DeviceMemoryBase& source,
const TransferMetadata* transfer_metadata) {
TF_RET_CHECK(shape.IsArray());
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
HostShapeToDeviceShape(shape), shape));
Literal literal(shape);
ShapedBuffer shaped_buffer(shape, stream->parent()->device_ordinal());
shaped_buffer.set_buffer(source, {});
TF_RETURN_IF_ERROR(TransferLiteralFromDevice(stream, shaped_buffer, &literal,
transfer_metadata));
return std::move(literal);
}
absl::Status TransferManager::TransferArrayToDevice(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
TF_RETURN_IF_ERROR(
TransferArrayToDeviceAsync(substream, literal, dest, transfer_metadata));
return substream->BlockHostUntilDone();
}
absl::Status TransferManager::TransferArrayToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata) {
TF_RET_CHECK(literal.shape().IsArray());
ShapedBuffer shaped_buffer(HostShapeToDeviceShape(literal.shape()),
stream->parent()->device_ordinal());
shaped_buffer.set_buffer(dest, {});
return TransferLiteralToDeviceAsync(stream, literal, shaped_buffer,
transfer_metadata);
}
absl::Status TransferManager::ReadDynamicShapes(
se::Stream* stream, const ShapedBuffer* device_buffer,
Shape* device_shape) {
DCHECK(device_shape->is_dynamic());
Shape original_device_shape = *device_shape;
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
TF_ASSIGN_OR_RETURN(
auto compiler, Compiler::GetForPlatform(stream->parent()->GetPlatform()));
TF_RETURN_IF_ERROR(device_buffer->buffers().ForEachElementWithStatus(
[&](const ShapeIndex& index,
const se::DeviceMemoryBase& buffer) -> absl::Status {
const Shape& buffer_shape =
ShapeUtil::GetSubshape(*device_shape, index);
if (buffer_shape.IsTuple()) {
return absl::OkStatus();
}
Shape& device_sub_shape =
*ShapeUtil::GetMutableSubshape(device_shape, index);
if (device_sub_shape.is_static()) {
return absl::OkStatus();
}
auto shape_size_fn = compiler->ShapeSizeBytesFunction();
Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape);
const int64_t offset = shape_size_fn(buffer_shape_static);
int64_t metadata_size = shape_size_fn(buffer_shape) - offset;
if (metadata_size == 0) {
return InvalidArgument("Dynamic shape metadata size should not be 0");
}
auto buffer_8 = se::DeviceMemory<uint8_t>(buffer);
auto metadata_buffer = buffer_8.GetSlice(offset, metadata_size);
TF_ASSIGN_OR_RETURN(
auto metadata,
TransferArrayFromDevice(
stream,
ShapeUtil::MakeShape(S32, {buffer_shape.dimensions_size()}),
metadata_buffer));
for (int64_t i = 0; i < metadata.element_count(); ++i) {
device_sub_shape.mutable_dimensions()[i] = metadata.Get<int32_t>({i});
}
return absl::OkStatus();
}));
device_shape->clear_dynamic_dimensions();
TF_RET_CHECK(ShapeUtil::DynamicShapeIsCompatible(*device_shape,
original_device_shape));
return absl::OkStatus();
}
void TransferManager::RegisterTransferManager(
se::Platform::Id platform_id,
TransferManagerCreationFunction creation_function) {
absl::MutexLock lock(&TransferManager::platform_transfer_manager_mutex_);
auto* managers = GetPlatformTransferManagers();
CHECK(managers->find(platform_id) == managers->end());
(*managers)[platform_id].creation_function = creation_function;
}
absl::StatusOr<TransferManager*> TransferManager::GetForPlatform(
const se::Platform* platform) {
absl::MutexLock lock(&TransferManager::platform_transfer_manager_mutex_);
auto* managers = GetPlatformTransferManagers();
auto it = managers->find(platform->id());
if (it == managers->end()) {
return NotFound(
"could not find registered transfer manager for platform %s -- check "
"target linkage",
platform->Name());
}
if (it->second.manager == nullptr) {
it->second.manager = (*it->second.creation_function)();
}
return it->second.manager.get();
}
absl::Status TransferManager::WriteTupleIndexTables(
se::Stream* stream, const ShapedBuffer& device_buffer) {
TF_RETURN_IF_ERROR(WriteTupleIndexTablesAsync(stream, device_buffer));
return stream->BlockHostUntilDone();
}
absl::Status TransferManager::WriteTupleIndexTablesAsync(
se::Stream* stream, const ShapedBuffer& device_buffer) {
VLOG(2) << "Writing tuple index tables for " << device_buffer;
return ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& device_subshape,
const ShapeIndex& index) -> absl::Status {
if (device_subshape.IsTuple() &&
ShapeUtil::TupleElementCount(device_subshape) > 0) {
se::DeviceMemoryBase device_memory = device_buffer.buffer(index);
TF_RET_CHECK(GetByteSizeRequirement(device_subshape) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
ShapeIndex element_index = index;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(device_subshape);
++i) {
element_index.push_back(i);
elements.push_back(device_buffer.buffer(element_index));
element_index.pop_back();
}
return WriteSingleTupleIndexTable(stream, elements, device_subshape,
&device_memory);
}
return absl::OkStatus();
});
}
absl::Status TransferManager::WriteRootTupleIndexTable(
se::Stream* stream, const ShapedBuffer& device_buffer) {
TF_RET_CHECK(device_buffer.on_device_shape().IsTuple());
if (ShapeUtil::TupleElementCount(device_buffer.on_device_shape()) == 0) {
return absl::OkStatus();
}
se::DeviceMemoryBase device_memory = device_buffer.buffer({});
TF_RET_CHECK(GetByteSizeRequirement(device_buffer.on_device_shape()) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
for (int64_t i = 0;
i < ShapeUtil::TupleElementCount(device_buffer.on_device_shape()); ++i) {
elements.push_back(device_buffer.buffer({i}));
}
return WriteSingleTupleIndexTable(
stream, elements, device_buffer.on_device_shape(), &device_memory);
}
absl::Status TransferManager::WriteRootTupleIndexTable(
se::Stream* stream, const ShapeTree<MaybeOwningDeviceMemory>& buffer_tree) {
TF_RET_CHECK(buffer_tree.shape().IsTuple());
if (ShapeUtil::TupleElementCount(buffer_tree.shape()) == 0) {
return absl::OkStatus();
}
se::DeviceMemoryBase device_memory =
buffer_tree.element({}).AsDeviceMemoryBase();
TF_RET_CHECK(GetByteSizeRequirement(buffer_tree.shape()) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(buffer_tree.shape());
++i) {
elements.push_back(buffer_tree.element({i}).AsDeviceMemoryBase());
}
return WriteSingleTupleIndexTable(stream, elements, buffer_tree.shape(),
&device_memory);
}
absl::StatusOr<ScopedShapedBuffer> TransferManager::AllocateScopedShapedBuffer(
const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
int device_ordinal, DeviceShapeRepresentationFn shape_representation_fn) {
if (!LayoutUtil::HasLayout(on_host_shape)) {
return InvalidArgument("Shape must have a layout: %s",
ShapeUtil::HumanStringWithLayout(on_host_shape));
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape));
Shape on_device_shape = (shape_representation_fn == nullptr)
? HostShapeToDeviceShape(on_host_shape)
: shape_representation_fn(on_host_shape);
TF_RET_CHECK(LayoutUtil::HasLayout(on_device_shape));
ScopedShapedBuffer shaped_buffer(std::move(on_device_shape), allocator,
device_ordinal);
for (auto& pair : shaped_buffer.buffers()) {
const ShapeIndex& index = pair.first;
se::DeviceMemoryBase& memory_base = pair.second;
const Shape& subshape =
ShapeUtil::GetSubshape(shaped_buffer.on_device_shape(), index);
TF_ASSIGN_OR_RETURN(auto memory,
allocator->Allocate(shaped_buffer.device_ordinal(),
GetByteSizeRequirement(subshape),
true,
LayoutUtil::MemorySpace(subshape)));
memory_base = memory.Release();
}
return std::move(shaped_buffer);
}
absl::StatusOr<Shape> TransferManager::ChooseCompactLayoutForShape(
const Shape& host_shape) const {
return LayoutUtil::GetWithDefaultLayout(host_shape);
}
xla::Shape TransferManager::ChooseGoodInfeedLayout(const Shape& shape) const {
return LayoutUtil::GetWithDefaultLayout(shape);
}
} | #include <memory>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/generic_transfer_manager.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/stream_pool.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/local_client_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class TransferManagerTest : public LocalClientTestBase {
protected:
TransferManagerTest()
: shape_size_fn_([this](const Shape& shape) {
return transfer_manager_->GetByteSizeRequirement(shape);
}) {
stream_ptr_ = local_client_->mutable_backend()
->BorrowStream(stream_executor_)
.value();
stream_ = stream_ptr_.get();
}
~TransferManagerTest() override = default;
ScopedShapedBuffer AllocateDeviceBuffer(const Shape& shape) {
return transfer_manager_
->AllocateScopedShapedBuffer(
shape, GetOrCreateAllocator(local_client_->platform()),
0)
.value();
}
protected:
StreamPool::Ptr stream_ptr_;
se::Stream* stream_;
private:
std::function<int64_t(const Shape&)> shape_size_fn_;
};
XLA_TEST_F(TransferManagerTest, TransferR0U32) {
Literal literal = LiteralUtil::CreateR0<uint32_t>(42);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR0Equal<uint32_t>(42, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1F32) {
Literal literal =
LiteralUtil::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f});
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>({1.25f, 2.5f, -17.0f, -20.125f},
result);
}
XLA_TEST_F(TransferManagerTest, TransferR1F32AwkwardSizes) {
constexpr int kMaxR1Size = (1 << 11);
for (int i = 0; i < kMaxR1Size; ++i) {
std::vector<float> inputs(i);
std::iota(inputs.begin(), inputs.end(), 0);
Literal literal = LiteralUtil::CreateR1<float>(inputs);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>(inputs, result);
}
}
XLA_TEST_F(TransferManagerTest, TransferR1LargeF32) {
std::vector<float> test_vector(1024 * 1024);
std::iota(test_vector.begin(), test_vector.end(), 0);
Literal literal = LiteralUtil::CreateR1<float>(test_vector);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>(test_vector, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1LargeUnalignedF32) {
std::vector<float> test_vector(1025);
std::iota(test_vector.begin(), test_vector.end(), 0);
Shape shape = ShapeUtil::MakeShape(F32, {1024});
BorrowingLiteral literal(reinterpret_cast<const char*>(&test_vector[1]),
shape);
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
std::vector<float> expected_output(1024);
std::iota(expected_output.begin(), expected_output.end(), 1);
LiteralTestUtil::ExpectR1Equal<float>(expected_output, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1U8) {
const char* test_string = "0123456789abcdef";
Literal literal = LiteralUtil::CreateR1U8(test_string);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_EQ(result.GetR1U8AsString(), test_string);
}
XLA_TEST_F(TransferManagerTest, TransferR2F32) {
Literal literal =
LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR2Equal<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, result);
}
XLA_TEST_F(TransferManagerTest,
TransferR2F32AndChangeLayoutTransferringToDevice) {
Literal literal = LiteralUtil::CreateR2WithLayout<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, LayoutUtil::MakeLayout({0, 1}));
const Shape ondevice_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 3}, {1, 0});
auto device_buffer = AllocateDeviceBuffer(ondevice_shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_FALSE(
LayoutUtil::Equal(result.shape().layout(), literal.shape().layout()));
LiteralTestUtil::ExpectR2Equal<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, result);
}
XLA_TEST_F(TransferManagerTest, TransferTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferEmptyTuple) {
Literal literal = LiteralUtil::MakeTuple({});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferNestedTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-10.0f, 123.0f})});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferComplexValue) {
Literal literal = LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferComplexValueInTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)}),
LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6}),
LiteralUtil::CreateR0<complex64>(complex64(0.3f, -0.4f))});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferTokenFromDevice) {
auto device_buffer = AllocateDeviceBuffer(ShapeUtil::MakeTokenShape());
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateToken(), result));
}
XLA_TEST_F(TransferManagerTest, OVERSIZE_ON_GRM(MultiStreamRoundTripSoak)) {
const int64_t kIterationCount = 5000;
Literal literal1 = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-10.0f, 123.0f})});
Literal literal2 = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(456.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{5.0f, 7.0f}, {9.0f, 4.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -11.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-98.0f, 153.0f})});
auto device_buffer1 = AllocateDeviceBuffer(literal1.shape());
auto device_buffer2 = AllocateDeviceBuffer(literal2.shape());
auto stream1 = stream_;
auto stream2 = stream_->GetOrCreateSubStream().value();
Literal result1, result2;
for (int i = 0; i < kIterationCount; ++i) {
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream1, literal1,
device_buffer1));
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream2, literal2,
device_buffer2));
TF_ASSERT_OK_AND_ASSIGN(
Literal this_result1,
transfer_manager_->TransferLiteralFromDevice(stream1, device_buffer1));
TF_ASSERT_OK_AND_ASSIGN(
Literal this_result2,
transfer_manager_->TransferLiteralFromDevice(stream2, device_buffer2));
result1 = std::move(this_result1);
result2 = std::move(this_result2);
}
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, result1));
EXPECT_TRUE(LiteralTestUtil::Equal(literal2, result2));
}
XLA_TEST_F(TransferManagerTest, DISABLED_ON_TPU(TransferDynamicShape)) {
TF_ASSERT_OK_AND_ASSIGN(
Shape s, ParseShape("(s64[], s32[<=1048576,3], f32[<=1048576,48])"));
Literal literal(s);
literal.SetDynamicSize(0, {1},
1048574);
literal.SetDynamicSize(0, {2},
1048575);
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {0})
.Populate<int64_t>(
[](absl::Span<const int64_t> indices) { return 42; }));
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {1})
.Populate<int32_t>([](absl::Span<const int64_t> indices) {
return indices[0] + indices[1];
}));
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {2})
.Populate<float>([](absl::Span<const int64_t> indices) {
return indices[0] + indices[1];
}));
ScopedShapedBuffer device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_EQ(literal.GetDynamicSize(0, {1}),
result.GetDynamicSize(0, {1}));
EXPECT_EQ(literal.GetDynamicSize(0, {2}),
result.GetDynamicSize(0, {2}));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
class TransferDeviceToHostBenchmark : public TransferManagerTest {
public:
using TransferManagerTest::TransferManagerTest;
~TransferDeviceToHostBenchmark() override {}
void Run(::testing::benchmark::State& state, int num_tuple_elements,
int array_size) {
SetUp();
std::vector<Literal> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal.shape());
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
for (auto s : state) {
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
}
TearDown();
}
void TestBody() override {}
};
class TransferHostToDeviceBenchmark : public TransferManagerTest {
public:
using TransferManagerTest::TransferManagerTest;
~TransferHostToDeviceBenchmark() override {}
void Run(::testing::benchmark::State& state, int num_tuple_elements,
int array_size) {
SetUp();
std::vector<Literal> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal.shape());
for (auto s : state) {
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
}
TearDown();
}
void TestBody() override {}
};
void BM_TransferDeviceToHost(::testing::benchmark::State& state) {
const int num_tuple_elements = state.range(0);
const int array_size = state.range(1);
TransferDeviceToHostBenchmark bm;
bm.Run(state, num_tuple_elements, array_size);
}
void BM_TransferHostToDevice(::testing::benchmark::State& state) {
const int num_tuple_elements = state.range(0);
const int array_size = state.range(1);
TransferHostToDeviceBenchmark bm;
bm.Run(state, num_tuple_elements, array_size);
}
BENCHMARK(BM_TransferHostToDevice)
->ArgPair(1, 256)
->ArgPair(1, 257)
->ArgPair(100, 256)
->ArgPair(100, 257);
BENCHMARK(BM_TransferDeviceToHost)
->ArgPair(1, 256)
->ArgPair(1, 257)
->ArgPair(100, 256)
->ArgPair(100, 257);
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
tsl::testing::RunBenchmarks();
return RUN_ALL_TESTS();
}
}
} |
1,894 | cpp | tensorflow/tensorflow | multi_output_fusion | third_party/xla/xla/service/gpu/transforms/multi_output_fusion.cc | third_party/xla/xla/service/gpu/transforms/multi_output_fusion_test.cc | #ifndef XLA_SERVICE_GPU_MULTI_OUTPUT_FUSION_H_
#define XLA_SERVICE_GPU_MULTI_OUTPUT_FUSION_H_
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_dfs_reachability.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuMultiOutputFusion : public HloModulePass {
public:
explicit GpuMultiOutputFusion(
const se::DeviceDescription& device_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function)
: device_info_(device_info), shape_size_function_(shape_size_function) {}
absl::string_view name() const override { return "multi_output_fusion"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool FuseSiblings(HloInstruction* parent, FusionInfoCache* fusion_info_cache,
GpuHloCostAnalysis* cost_analysis);
absl::StatusOr<bool> DoMultiOutputFusion();
void RecomputeReachability();
void DumpFusionState(const HloInstruction& consumer, absl::string_view label,
const HloInstruction* producer = nullptr);
HloComputation* computation_;
std::unique_ptr<HloDfsReachability> reachability_;
se::DeviceDescription device_info_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
};
}
}
#endif
#include "xla/service/gpu/multi_output_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_dfs_reachability.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsProfitableOperand(HloInstruction* instr) {
return !ShapeUtil::IsEffectiveScalar(instr->shape());
}
const HloSliceInstruction* FindUniqueSlice(const HloInstruction* parent,
const HloInstruction* instr) {
if (const auto* slice = DynCast<HloSliceInstruction>(instr)) {
return slice;
} else if (const auto* fusion = DynCast<HloFusionInstruction>(instr)) {
const HloSliceInstruction* result = nullptr;
for (size_t i = 0; i < fusion->operand_count(); ++i) {
if (fusion->operand(i) == parent) {
if (result) return nullptr;
auto* called_param = fusion->fused_parameter(i);
if (called_param->user_count() != 1) return nullptr;
result = FindUniqueSlice(called_param, called_param->users()[0]);
if (!result) return nullptr;
}
}
return result;
} else {
return nullptr;
}
}
FusionDecision ParameterSlicesAreNonOverlapping(const HloInstruction& instr1,
const HloInstruction& instr2,
const HloInstruction* parent) {
if (parent->shape().IsTuple()) return {};
if (ShapeUtil::ByteSizeOfElements(parent->shape()) < 1024) return {};
const HloSliceInstruction* slice1 = FindUniqueSlice(parent, &instr1);
const HloSliceInstruction* slice2 = FindUniqueSlice(parent, &instr2);
if (!slice1 || !slice2) return {};
auto& starts1 = slice1->slice_starts();
auto& starts2 = slice2->slice_starts();
auto& limits1 = slice1->slice_limits();
auto& limits2 = slice2->slice_limits();
for (int64_t dim = 0; dim < parent->shape().rank(); ++dim) {
bool overlap = starts1[dim] < limits2[dim] && starts2[dim] < limits1[dim];
if (!overlap) {
return "slices are non-overlapping";
}
}
return {};
}
FusionDecision LegalToFuse(const HloInstruction& instr1,
const HloInstruction& instr2,
const se::DeviceDescription& device_info,
FusionInfoCache* fusion_info_cache) {
CHECK(instr1.opcode() == HloOpcode::kFusion);
if (instr1.fused_expression_root()->opcode() ==
HloOpcode::kDynamicUpdateSlice ||
(instr2.opcode() == HloOpcode::kFusion &&
instr2.fused_expression_root()->opcode() ==
HloOpcode::kDynamicUpdateSlice)) {
return "can't fuse multiple DUSs";
}
return FusionFitsInBudget(instr1, instr2, device_info,
false,
fusion_info_cache);
}
int FusionPriority(const HloInstruction* instr) {
if (instr->IsMultiOutputFusion()) {
return 2;
}
if (instr->opcode() == HloOpcode::kFusion) {
return 1;
}
return 0;
}
HloInstruction* SelectPreferredFusionCandidate(
const std::vector<HloInstruction*> candidates) {
if (candidates.empty()) {
return nullptr;
}
return *std::max_element(
candidates.begin(), candidates.end(),
[](const HloInstruction* a, const HloInstruction* b) {
return FusionPriority(a) < FusionPriority(b);
});
}
FusionDecision OperandReachableFromProducer(
const HloInstruction& producer, const HloInstruction& consumer,
const HloDfsReachability& reachability) {
for (const auto* operand : consumer.operands()) {
if (!reachability.IsPresent(operand) &&
operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->operand(0);
}
CHECK(reachability.IsPresent(operand) && reachability.IsPresent(&producer))
<< "Reachability map is incomplete. This should never "
"happen.";
if (&producer != operand && reachability.IsReachable(&producer, operand)) {
return {
absl::StrCat(producer.name(), " would introduce a cycle when fused")};
}
}
return {};
}
FusionDecision ProducerCandidateIsFusible(
const HloInstruction& producer, const HloInstruction& consumer,
const HloDfsReachability& reachability, FusionInfoCache* fusion_info_cache,
GpuHloCostAnalysis* cost_analysis) {
if (!IsFusibleAsMultiOutputFusionRoot(consumer)) {
return "consumer not eligible as multi-output fusion root.";
}
RETURN_IF_NOT_FUSIBLE(
ShapesCompatibleForMultiOutputFusion(consumer, producer));
RETURN_IF_NOT_FUSIBLE(
OperandReachableFromProducer(producer, consumer, reachability));
RETURN_IF_NOT_FUSIBLE(FusionFitsInBudget(
producer, consumer, *cost_analysis->device_info_,
false, fusion_info_cache));
if (cost_analysis->ProducerConsumerMergedTooLarge(producer, consumer)) {
return "will generate too large IR";
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
&producer, cost_analysis, GpuPerformanceModelOptions::Default(),
{&consumer},
true);
if (t.time_fused > t.time_unfused) {
return "will execute slower if fused";
}
return {};
}
std::vector<HloInstruction*> GetProducerConsumerMultiOutputFusionCandidates(
const HloInstruction* producer, const HloDfsReachability& reachability,
FusionInfoCache* fusion_info_cache, GpuHloCostAnalysis* cost_analysis) {
std::vector<HloInstruction*> fusion_candidates;
const HloComputation* computation = producer->parent();
const HloModule* module = computation->parent();
bool dump_fusion =
module->config().debug_options().xla_dump_fusion_visualization();
if (!IsProducerMultiOutputFusible(*producer)) {
return fusion_candidates;
}
if (producer->user_count() == 1 &&
!producer->users()[0]->IsMultiOutputFusion()) {
return fusion_candidates;
}
for (HloInstruction* consumer : producer->users()) {
VLOG(3) << "Looking at producer " << producer->name()
<< " and its consumer " << consumer->name();
if (auto decision =
ProducerCandidateIsFusible(*producer, *consumer, reachability,
fusion_info_cache, cost_analysis)) {
fusion_candidates.push_back(consumer);
} else if (dump_fusion) {
RegisterFusionState(
*computation,
absl::StrCat("Not considering fusion of producer |", producer->name(),
"| into consumer |", consumer->name(),
"| due to: ", decision.Explain()),
*consumer, producer);
}
}
return fusion_candidates;
}
bool IsSiblingFusionCandidate(const HloInstruction* instr) {
if (instr->users().empty() || !IsFusibleAsMultiOutputFusionRoot(*instr) ||
IsNestableVariadicReduction(*instr)) {
return false;
}
return (!instr->IsMultiOutputFusion() ||
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return user->opcode() == HloOpcode::kGetTupleElement;
}));
}
FusionDecision CanFuseSiblings(const HloInstruction& sibling_consumer_1,
const HloInstruction& sibling_consumer_2,
const HloInstruction& common_producer,
const HloDfsReachability& reachability,
FusionInfoCache* fusion_info_cache,
GpuHloCostAnalysis* cost_analysis) {
if (reachability.IsConnected(&sibling_consumer_1, &sibling_consumer_2)) {
return {absl::StrCat(sibling_consumer_1.name(), " and ",
sibling_consumer_2.name(), " are connected")};
}
RETURN_IF_NOT_FUSIBLE(ShapesCompatibleForMultiOutputFusion(
sibling_consumer_1, sibling_consumer_2));
RETURN_IF_NOT_FUSIBLE(ParameterSlicesAreNonOverlapping(
sibling_consumer_1, sibling_consumer_2, &common_producer));
RETURN_IF_NOT_FUSIBLE(LegalToFuse(sibling_consumer_1, sibling_consumer_2,
*cost_analysis->device_info_,
fusion_info_cache));
return {};
}
}
void GpuMultiOutputFusion::RecomputeReachability() {
reachability_ = HloDfsReachability::Build(computation_);
}
bool GpuMultiOutputFusion::FuseSiblings(HloInstruction* parent,
FusionInfoCache* fusion_info_cache,
GpuHloCostAnalysis* cost_analysis) {
const HloComputation* computation = parent->parent();
const HloModule* module = computation->parent();
bool dump_fusion =
module->config().debug_options().xla_dump_fusion_visualization();
if (!IsProfitableOperand(parent)) {
VLOG(3) << "Operand " << parent->ToShortString() << " is not profitable";
return false;
}
bool changed = false;
std::vector<HloInstruction*> siblings;
absl::c_copy_if(parent->users(), std::back_inserter(siblings),
IsSiblingFusionCandidate);
absl::c_stable_sort(siblings,
[](const HloInstruction* a, const HloInstruction* b) {
return FusionPriority(a) > FusionPriority(b);
});
for (auto i = siblings.begin(); i != siblings.end(); ++i) {
VLOG(3) << "Considering " << (*i)->name();
if ((*i)->opcode() != HloOpcode::kFusion) {
continue;
}
for (auto j = i + 1; j != siblings.end();) {
VLOG(3) << "Considering " << (*i)->name() << " and " << (*j)->name();
if (auto fusible = CanFuseSiblings(**i, **j, *parent, *reachability_,
fusion_info_cache, cost_analysis);
!fusible) {
if (dump_fusion) {
RegisterFusionState(
*computation,
absl::StrCat("Not fusing siblings |", (**i).name(), "| and |",
(**j).name(), "| due to: ", fusible.Explain()),
**i,
parent);
}
++j;
continue;
}
if (!ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing siblings %s and %s.",
(*i)->name(), (*j)->name());
})) {
++j;
continue;
}
VLOG(2) << "Fuse siblings " << (*i)->name() << " and " << (*j)->name();
fusion_info_cache->Invalidate(*i);
fusion_info_cache->Invalidate(*j);
HloInstruction* remaining = *i;
HloInstruction* fused = *j;
TF_CHECK_OK(cost_analysis->RemoveInstruction(remaining));
TF_CHECK_OK(cost_analysis->RemoveInstruction(fused));
DumpFusionState(*remaining,
absl::StrCat("About to fuse sibling |", fused->name(),
"| into sibling |", remaining->name(),
"| inside multi-output fusion"),
fused);
if (fused->opcode() == HloOpcode::kFusion) {
remaining->MergeFusionInstructionIntoMultiOutput(fused);
if (fused->IsInputFusion()) {
remaining->set_fusion_kind(HloInstruction::FusionKind::kInput);
}
} else {
remaining->FuseInstructionIntoMultiOutput(fused);
CHECK_EQ(0, fused->user_count());
TF_CHECK_OK(computation_->RemoveInstruction(fused));
}
DumpFusionState(*remaining,
absl::StrCat("Fused into |", remaining->name(),
"| inside multi-output fusion"));
TF_CHECK_OK(cost_analysis->RevisitInstruction(remaining));
changed = true;
siblings.erase(j);
RecomputeReachability();
}
}
return changed;
}
absl::StatusOr<bool> GpuMultiOutputFusion::DoMultiOutputFusion() {
bool changed = false;
RecomputeReachability();
GpuHloCostAnalysis cost_analysis({shape_size_function_,
{},
true},
&device_info_);
TF_RETURN_IF_ERROR(computation_->Accept(&cost_analysis));
std::vector<HloInstruction*> defs_before_uses =
computation_->MakeInstructionPostOrder();
FusionInfoCache fusion_info_cache;
for (auto it = defs_before_uses.rbegin(); it != defs_before_uses.rend();
++it) {
auto* producer = *it;
if (producer->opcode() == HloOpcode::kConstant) {
VLOG(3) << producer->name() << " is a constant.";
continue;
}
if (producer->IsCustomFusion()) {
continue;
}
if (FuseSiblings(producer, &fusion_info_cache, &cost_analysis)) {
changed = true;
}
const auto candidates = GetProducerConsumerMultiOutputFusionCandidates(
producer, *reachability_, &fusion_info_cache, &cost_analysis);
auto* consumer_for_fusion = SelectPreferredFusionCandidate(candidates);
if (consumer_for_fusion == nullptr) {
continue;
}
if (!ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing %s and %s.", producer->name(),
consumer_for_fusion->name());
})) {
continue;
}
changed = true;
fusion_info_cache.Invalidate(producer);
fusion_info_cache.Invalidate(consumer_for_fusion);
TF_RETURN_IF_ERROR(cost_analysis.RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis.RemoveInstruction(consumer_for_fusion));
HloInstruction* input_fusion;
if (consumer_for_fusion->opcode() == HloOpcode::kFusion) {
input_fusion = consumer_for_fusion;
VLOG(2) << "Fuse producer " << producer->name() << " into its consumer "
<< consumer_for_fusion->name();
} else {
input_fusion = computation_->AddInstruction(HloInstruction::CreateFusion(
consumer_for_fusion->shape(),
ChooseFusionKind(*producer, *consumer_for_fusion),
consumer_for_fusion));
VLOG(2) << "Fuse producer " << producer->name() << " and its consumer "
<< consumer_for_fusion->name() << " into "
<< input_fusion->name();
TF_CHECK_OK(
computation_->ReplaceInstruction(consumer_for_fusion, input_fusion));
}
DumpFusionState(*input_fusion,
absl::StrCat("About to fuse producer |", producer->name(),
"| into consumer |", input_fusion->name(),
"| inside multi-output fusion"),
producer);
if (producer->opcode() == HloOpcode::kFusion) {
input_fusion->MergeFusionInstructionIntoMultiOutput(producer);
} else {
input_fusion->FuseInstructionIntoMultiOutput(producer);
CHECK_EQ(0, producer->user_count());
TF_CHECK_OK(computation_->RemoveInstruction(producer));
}
TF_RETURN_IF_ERROR(cost_analysis.RevisitInstruction(input_fusion));
DumpFusionState(*input_fusion,
absl::StrCat("Fused into |", input_fusion->name(),
"| inside multi-output fusion"));
RecomputeReachability();
}
return changed;
}
void GpuMultiOutputFusion::DumpFusionState(const HloInstruction& consumer,
absl::string_view label,
const HloInstruction* producer) {
if (consumer.GetModule()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {
RegisterFusionState(*computation_, label, consumer, producer);
}
}
absl::StatusOr<bool> GpuMultiOutputFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : GetFusibleComputations(*module, execution_threads)) {
computation_ = computation;
TF_ASSIGN_OR_RETURN(bool computation_changed, DoMultiOutputFusion());
changed |= computation_changed;
}
return changed;
}
}
} | #include "xla/service/gpu/multi_output_fusion.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace m = ::xla::match;
class MultiOutputFusionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
GpuMultiOutputFusion mof_{
TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
void CheckGpuMultiOutputFusion(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo,
GpuMultiOutputFusion{
TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()},
expected);
}
};
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
scalar_mul_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
})";
static int64_t CountMultiOutputFusions(const HloModule* module) {
int multi_output_fusion_count = 0;
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instr : computation->instructions()) {
if (instr->IsMultiOutputFusion()) {
multi_output_fusion_count++;
}
}
}
return multi_output_fusion_count;
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] constant(1)
fusion = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation
reduce.2 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion, reduce.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceInputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[6400]{0} parameter(1)
mul = f32[6400]{0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[6400]{0} parameter(1)
r1 = f32[64,100]{0,1} reshape(p1.2)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[] reduce(r1, const.2), dimensions={1,0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[6400]{0} parameter(1)
fusion.1 = f32[] fusion(p0, p1), kind=kInput, calls=fused_computation_1
fusion.2 = f32[] fusion(p0, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[], f32[]) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ReduceMofDifferentTypes) {
const char* hlo = R"(
HloModule module
scalar_add_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT add.1 = f32[] add(scalar_lhs.1, scalar_rhs.1)
}
scalar_add_computation_f16 {
scalar_lhs.0 = f16[] parameter(0)
scalar_rhs.0 = f16[] parameter(1)
ROOT add.0 = f16[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation {
param_0.2 = f32[128,512,28,28]{3,2,1,0} parameter(0)
c.1 = f16[128,512,28,28]{3,2,1,0} convert(param_0.2)
const.0 = f16[] constant(0)
ROOT reduce.0 = f16[512]{0} reduce(c.1, const.0), dimensions={0,2,3}, to_apply=scalar_add_computation_f16
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] constant(0)
reduce.1 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
fusion = f16[512]{0} fusion(p1), kind=kInput, calls=fused_computation
ROOT root = (f32[512]{0}, f16[512]{0}) tuple(reduce.1, fusion)
})";
CheckGpuMultiOutputFusion(hlo, R"(
)");
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
mul = f32[10,10]{1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0,1}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[10,10]{1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[10]{0} reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1.3 = f32[10,10]{1,0} parameter(1)
fusion.1 = f32[] fusion(p0, p1.3), kind=kInput, calls=fused_computation_1
p2 = f32[] parameter(2)
fusion.2 = f32[10]{0} fusion(p2, p1.3), kind=kInput, calls=fused_computation_2
ROOT root = (f32[], f32[10]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[512]{0} reduce(p1.2, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
fusion.1 = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation_1
fusion.2 = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionNoSiblingFusionForCommonScalar) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
param_0.87 = bf16[32,4096,16384]{2,1,0} parameter(0)
param_1.4620 = s32[] parameter(1)
constant_3949 = s32[] constant(0)
compare.1026 = pred[] compare(param_1.4620, constant_3949), direction=LT
constant_5437 = s32[] constant(32)
add.6859 = s32[] add(param_1.4620, constant_5437)
select.1599 = s32[] select(compare.1026, add.6859, param_1.4620)
dynamic-slice.59 = bf16[1,4096,16384]{2,1,0} dynamic-slice(param_0.87, select.1599, constant_3949, constant_3949), dynamic_slice_sizes={1,4096,16384}
ROOT bitcast.41089 = bf16[4096,16384]{1,0} bitcast(dynamic-slice.59)
}
fused_computation_2 {
param_0 = bf16[32,4096,16384]{2,1,0} parameter(0)
param_1 = s32[] parameter(1)
constant = s32[] constant(0)
compare = pred[] compare(param_1, constant), direction=LT
constant.32 = s32[] constant(32)
add = s32[] add(param_1, constant.32)
select = s32[] select(compare, add, param_1)
dynamic-slice = bf16[1,4096,16384]{2,1,0} dynamic-slice(param_0, select, constant, constant), dynamic_slice_sizes={1,4096,16384}
ROOT bitcast.41087 = bf16[4096,16384]{1,0} bitcast(dynamic-slice)
}
ENTRY entry {
p0 = s32[] parameter(0)
p1 = bf16[32,4096,16384]{2,1,0} parameter(1)
p2 = bf16[32,4096,16384]{2,1,0} parameter(2)
fusion.1 = bf16[4096,16384]{1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation_1
fusion.2 = bf16[4096,16384]{1,0} fusion(p2, p0), kind=kLoop, calls=fused_computation_2
ROOT root = (bf16[4096,16384]{1,0}, bf16[4096,16384]{1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingReduceAndReduceMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation (p0: f32[128,512,28,28]) -> (f32[512], f32[512]) {
const.1 = f32[] constant(1)
p0.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(f32[128,512,28,28]{3,2,1,0} p0.1, f32[128,512,28,28]{3,2,1,0} p0.1)
reduce.1 = f32[512]{0} reduce(f32[128,512,28,28]{3,2,1,0} mul, f32[] const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
reduce.2 = f32[512]{0} reduce(f32[128,512,28,28]{3,2,1,0} p0.1, f32[] const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT tuple = (f32[512]{0}, f32[512]{0}) tuple(f32[512]{0} reduce.1, f32[512]{0} reduce.2)
}
ENTRY entry (p0: f32[128,512,28,28]) -> (f32[512], f32[512], f32[512]) {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
const = f32[] constant(1)
fusion = (f32[512]{0}, f32[512]{0}) fusion(f32[128,512,28,28]{3,2,1,0} p0), kind=kInput, calls=fused_computation
get-tuple-element = f32[512]{0} get-tuple-element((f32[512]{0}, f32[512]{0}) fusion), index=0
get-tuple-element.1 = f32[512]{0} get-tuple-element((f32[512]{0}, f32[512]{0}) fusion), index=1
reduce.3 = f32[512]{0} reduce(p0, const), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT root = (f32[512]{0}, f32[512]{0}, f32[512]{0}) tuple(f32[512]{0} get-tuple-element, f32[512]{0} get-tuple-element.1, f32[512]{0} reduce.3)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingFusionCheckAgainstReduceOperand) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
mul = f32[10,10]{1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
reduce.1 = f32[] reduce(p1.1, const.1), dimensions={0,1}, to_apply=scalar_add_computation
ROOT tuple = (f32[10,10], f32[]) tuple(mul, reduce.1)
}
fused_computation_2 {
p1.2 = f32[10,10]{1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[10] reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[10,10]{1,0} parameter(1)
p2 = f32[] parameter(2)
fusion.1 = (f32[10,10], f32[]) fusion(p0, p1), kind=kInput, calls=fused_computation_1
get-tuple-element.1 = f32[10,10] get-tuple-element((f32[10,10], f32[]) fusion.1), index=0
get-tuple-element.2 = f32[] get-tuple-element((f32[10,10], f32[]) fusion.1), index=1
fusion.2 = f32[10] fusion(p2, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[10,10], f32[], f32[10]) tuple(get-tuple-element.1, get-tuple-element.2, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, LoopVariadicReductionFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.94 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax_func.1536 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94
}
fused_computation {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
fused_computation2 {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(inf)
tmp_3 = s32[] constant(1)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation
tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1
tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation2
tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1
ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5)
})"))
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, InputVariadicReductionFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.1117 {
param_0.2433 = f32[] parameter(0)
param_1.2571 = f32[] parameter(1)
compare.1770 = pred[] compare(param_0.2433, param_1.2571), direction=LE
select.682 = f32[] select(compare.1770, param_0.2433, param_1.2571)
compare.1303.clone.1 = pred[] compare(param_0.2433, param_1.2571), direction=EQ
param_2.6460 = s32[] parameter(2)
param_3.6755 = s32[] parameter(3)
minimum.633.clone.1 = s32[] minimum(param_2.6460, param_3.6755)
select.398.clone.1 = s32[] select(compare.1770, param_2.6460, param_3.6755)
select.397.clone.1 = s32[] select(compare.1303.clone.1, minimum.633.clone.1, select.398.clone.1)
ROOT tuple.151 = (f32[], s32[]) tuple(select.682, select.397.clone.1)
}
minmax_func.223 {
lhs_value.224 = f32[] parameter(0)
rhs_value.226 = f32[] parameter(2)
lhs_index.225 = s32[] parameter(1)
rhs_index.227 = s32[] parameter(3)
ROOT fusion.1117 = (f32[], s32[]) fusion(lhs_value.224, rhs_value.226, lhs_index.225, rhs_index.227), kind=kLoop, calls=fused_computation.1117
}
fused_computation.73 {
bitcast.86661 = f32[3,1024,300]{2,1,0} parameter(0)
iota.734 = s32[3,1,1024,300]{3,2,1,0} iota(), iota_dimension=3
bitcast.97555 = s32[3,1024,300]{2,1,0} bitcast(iota.734)
constant_3917 = f32[] constant(inf)
constant_3918 = s32[] constant(0)
ROOT reduce.1069 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) reduce(bitcast.86661, bitcast.97555, constant_3917, constant_3918), dimensions={2}, to_apply=minmax_func.223
}
fused_computation.84 {
bitcast.86676 = f32[3,1024,300]{2,1,0} parameter(0)
iota.732 = s32[3,1,1024,300]{3,2,1,0} iota(), iota_dimension=3
bitcast.97553 = s32[3,1024,300]{2,1,0} bitcast(iota.732)
constant_3915 = f32[] constant(inf)
constant_3916 = s32[] constant(0)
ROOT reduce.1070 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) reduce(bitcast.86676, bitcast.97553, constant_3915, constant_3916), dimensions={2}, to_apply=minmax_func.223
}
ENTRY e {
p0 = f32[3,1024,300]{2,1,0} parameter(0)
fusion.84 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) fusion(p0), kind=kInput, calls=fused_computation.84
gte.391 = s32[3,1024]{1,0} get-tuple-element(fusion.84), index=1
fusion.73 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) fusion(p0), kind=kInput, calls=fused_computation.73
gte.393 = s32[3,1024]{1,0} get-tuple-element(fusion.73), index=1
ROOT r = s32[3,1024]{1,0} add(gte.391, gte.393)
})"))
.value();
EXPECT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->parameter_instruction(0)->user_count(),
1);
const HloInstruction* fusion =
module->entry_computation()->parameter_instruction(0)->users()[0];
EXPECT_THAT(fusion, GmockMatch(m::Fusion()));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionTwoLoops) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
ROOT div = f32[6400]{0} divide(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Divide())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
div = f32[6400]{0} divide(p0, broadcast)
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Divide())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopsDifferentShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
ROOT mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
ROOT reduce = f32[1,5,1,2]{3,2,1,0} reduce(p0.2, const.2), dimensions={0,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
fusion.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[1,5,1,2]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[8,1,5,16,1,2]{5,4,3,2,1,0}, f32[1,5,1,2]{3,2,1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopAndMultiOutputLoop) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2),
dimensions={}
ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0})
tuple(gte0, gte1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Exp(), m::Add())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingMultiOutputLoopAndMultiOutputLoop) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,16]{1,0} parameter(0)
mul = f32[8,16]{1,0} multiply(p0.1, p0.1)
exp = f32[8,16]{1,0} exponential(p0.1)
ROOT tuple = (f32[8,16]{1,0}, f32[8,16]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,16]{1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,16]{1,0} broadcast(const.2),
dimensions={}
add = f32[8,16]{1,0} add(p0.2, broadcast)
ROOT tuple.1 = (f32[8,16]{1,0}, f32[8,16]{1,0}) tuple(add, broadcast)
}
ENTRY entry {
p0 = f32[8,16]{1,0} parameter(0)
fusion.1 = (f32[8,16]{1,0}, f32[8,16]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = (f32[8,16]{1,0}, f32[8,16]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,16]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,16]{1,0} get-tuple-element(fusion.1), index=1
gte2 = f32[8,16]{1,0} get-tuple-element(fusion.2), index=0
gte3 = f32[8,16]{1,0} get-tuple-element(fusion.2), index=1
ROOT root = (f32[8,16]{1,0}, f32[8,16]{1,0}, f32[8,16]{1,0},
f32[8,16]{1,0})
tuple(gte0, gte1, gte2, gte3)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Exp(), m::Add(), m::Broadcast())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingLoopAndMultiOutputLoopDifferentShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,2]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
ROOT reduce = f32[1,5,1,2]{3,2,1,0} reduce(p0.2, const.2),
dimensions={0,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = f32[1,5,1,2]{3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}, f32[1,5,1,2]{3,2,1,0})
tuple(gte0, gte1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, SiblingFusionBitcastAndLoopFusionNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation_1 {
p0.1 = f32[2048,16000]{1,0} parameter(0)
bitcast = f32[2048,1,16000]{2,1,0} bitcast(p0.1)
ROOT exp = f32[2048,1,16000]{2,1,0} exponential(bitcast)
}
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
fusion = f32[2048,1,16000]{2,1,0} fusion(param_0), kind=kLoop, calls=fused_computation_1
bitcast = f32[16000,1,2048]{2,1,0} bitcast(param_0)
ROOT tuple.143 = (f32[16000,1,2048]{2,1,0}, f32[2048,1,16000]{2,1,0}) tuple(bitcast, fusion)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionBitcastAndElementwiseNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
convert = bf16[2048,16000]{1,0} convert(param_0)
bitcast = bf16[16000,1,2048]{2,1,0} bitcast(convert)
ROOT tuple.143 = (bf16[16000,1,2048]{2,1,0}, bf16[2048,16000]{1,0}) tuple(bitcast, convert)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionElementwiseAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add_computation
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Exp())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add
reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Add())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] |
1,895 | cpp | tensorflow/tensorflow | all_gather_broadcast_reorder | third_party/xla/xla/service/all_gather_broadcast_reorder.cc | third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc | #ifndef XLA_SERVICE_ALL_GATHER_BROADCAST_REORDER_H_
#define XLA_SERVICE_ALL_GATHER_BROADCAST_REORDER_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllGatherBroadcastReorder : public HloModulePass {
public:
absl::string_view name() const override { return "all-gather-bcast-reorder"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/all_gather_broadcast_reorder.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> AllGatherBroadcastReorder::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedCollective(*module,
HloOpcode::kAllGather)) {
VLOG(1) << "Skip AllGatherBroadcastReorder because the module contains "
"all-gather with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kAllGather || !inst->shape().IsArray() ||
inst->operand(0)->opcode() != HloOpcode::kBroadcast) {
continue;
}
HloAllGatherInstruction *ag = Cast<HloAllGatherInstruction>(inst);
HloBroadcastInstruction *bcast =
Cast<HloBroadcastInstruction>(inst->mutable_operand(0));
absl::flat_hash_set<int64_t> non_uniform_dims;
non_uniform_dims.insert(bcast->dimensions().begin(),
bcast->dimensions().end());
const bool all_gather_along_uniform_dim =
non_uniform_dims.insert(ag->all_gather_dimension()).second;
int64_t uniform_dim_size = 1;
for (int64_t i = 0; i < ag->shape().rank(); ++i) {
if (non_uniform_dims.count(i) == 0) {
uniform_dim_size *= ag->shape().dimensions(i);
}
}
if (uniform_dim_size == 1) {
continue;
}
HloInstruction *replacement;
const int64_t ag_dim = ag->all_gather_dimension();
if (!all_gather_along_uniform_dim) {
VLOG(2) << "All-gather along non uniform dimension";
auto ag_dim_index = PositionInContainer(bcast->dimensions(), ag_dim);
Shape new_ag_shape = bcast->operand(0)->shape();
new_ag_shape.set_dimensions(ag_dim_index,
ag->shape().dimensions(ag_dim));
auto *new_ag =
Cast<HloAllGatherInstruction>(computation->AddInstruction(
ag->CloneWithNewOperands(new_ag_shape, bcast->operands())));
if (ag->channel_id()) {
new_ag->set_channel_id(next_channel_id++);
}
new_ag->set_all_gather_dimension(ag_dim_index);
replacement = computation->AddInstruction(
bcast->CloneWithNewOperands(ag->shape(), {new_ag}));
} else {
VLOG(2) << "All-gather along uniform dimension";
HloInstruction *x = bcast->mutable_operand(0);
std::vector<int64_t> shape_dims{1};
absl::Span<const int64_t> x_dims = x->shape().dimensions();
shape_dims.insert(shape_dims.end(), x_dims.begin(), x_dims.end());
Shape shape =
ShapeUtil::MakeShape(x->shape().element_type(), shape_dims);
HloInstruction *rs0 = computation->AddInstruction(
HloInstruction::CreateReshape(shape, x));
const int64_t ag_factor = ag->shape().dimensions(ag_dim) /
ag->operand(0)->shape().dimensions(ag_dim);
shape.set_dimensions(0, ag_factor);
auto *new_ag =
Cast<HloAllGatherInstruction>(computation->AddInstruction(
ag->CloneWithNewOperands(shape, {rs0})));
if (ag->channel_id()) {
new_ag->set_channel_id(next_channel_id++);
}
new_ag->set_all_gather_dimension(0);
std::vector<int64_t> bcast_shape_dims =
SpanToVector(ag->shape().dimensions());
bcast_shape_dims[ag_dim] = ag_factor;
bcast_shape_dims.insert(bcast_shape_dims.begin() + ag_dim + 1,
ag->shape().dimensions(ag_dim) / ag_factor);
Shape bcast_shape =
ShapeUtil::MakeShape(x->shape().element_type(), bcast_shape_dims);
std::vector<int64_t> bcast_dims;
bcast_dims.push_back(ag_dim);
for (int64_t d : bcast->dimensions()) {
bcast_dims.push_back(d + (d > ag_dim));
}
HloInstruction *bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(bcast_shape, new_ag, bcast_dims));
replacement = computation->AddInstruction(
HloInstruction::CreateReshape(ag->shape(), bcast));
}
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(replacement));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ag));
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_gather_broadcast_reorder.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class AllGatherBroadcastReorderTest : public HloTestBase {
public:
enum class PassOutput { NoChange, NonUniformAGPattern, UniformAGPattern };
void RunPass(absl::string_view hlo_module, PassOutput expected_output) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_module));
auto changed = AllGatherBroadcastReorder().Run(module.get());
ASSERT_TRUE(changed.ok());
if (expected_output == PassOutput::NoChange) {
EXPECT_FALSE(changed.value());
} else {
EXPECT_TRUE(changed.value());
if (expected_output == PassOutput::NonUniformAGPattern) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::Broadcast(m::AllGather(m::Parameter())));
} else {
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Reshape(m::Broadcast(m::AllGather(m::Reshape(m::Parameter())))));
}
}
}
};
TEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongNonUniformDim) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[128, 5] parameter(0)
bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0}
ROOT ag = f32[5, 4, 8, 256] all-gather(bc), dimensions={3}, replica_groups={{0, 1}}
}
)";
RunPass(hlo_string, PassOutput::NonUniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongUniformDim) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[128, 5] parameter(0)
bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0}
ROOT ag = f32[5, 12, 8, 128] all-gather(bc), dimensions={1}, replica_groups={{0, 1, 2}}
}
)";
RunPass(hlo_string, PassOutput::UniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, Simple_GatherBroadcastScalar) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[] parameter(0)
bc = f32[4, 8] broadcast(x), dimensions={}
ROOT ag = f32[12, 8] all-gather(bc), dimensions={0}, replica_groups={{0, 1, 2}}
}
)";
RunPass(hlo_string, PassOutput::UniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, T5Test) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[128] parameter(0)
bc = f32[1,4,84,128]{3,2,1,0} broadcast(x), dimensions={3}
ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(bc), channel_id=6,
replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassOutput::UniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, FailedMatch) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[1,4,84,128] parameter(0)
ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(x), channel_id=6,
replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassOutput::NoChange);
}
}
} |
1,896 | cpp | tensorflow/tensorflow | defuser | third_party/xla/xla/service/defuser.cc | third_party/xla/xla/service/defuser_test.cc | #ifndef XLA_SERVICE_DEFUSER_H_
#define XLA_SERVICE_DEFUSER_H_
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class Defuser : public HloModulePass {
public:
Defuser() {}
~Defuser() override {}
absl::string_view name() const override { return "defuser"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/defuser.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
absl::StatusOr<bool> Defuser::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Defusing module " << module->name();
XLA_VLOG_LINES(2, "Before defusion:\n" + module->ToString());
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(
[&](const CallGraphNode& call_graph_node) -> absl::Status {
if (call_graph_node.computation()->IsFusionComputation()) {
TF_RET_CHECK(call_graph_node.caller_callsites().size() == 1);
HloInstruction* fusion_instruction =
call_graph_node.caller_callsites()[0].instruction();
TF_RETURN_IF_ERROR(fusion_instruction->Defuse());
changed = true;
}
return absl::OkStatus();
},
true));
XLA_VLOG_LINES(2, "After defusion:\n" + module->ToString());
return changed;
}
} | #include "xla/service/defuser.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class DefuserTest : public HloTestBase {
protected:
int FusionCount(const HloModule* m) {
int count = 0;
for (HloComputation* computation : m->computations()) {
if (computation->IsFusionComputation()) {
count++;
}
}
return count;
}
Defuser defuser_;
const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2});
};
TEST_F(DefuserTest, NoFusionInstruction) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
m->AddEntryComputation(builder.Build());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_FALSE(defuser_.Run(m.get()).value());
}
TEST_F(DefuserTest, TrivialFusionInstructionAsRoot) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Parameter(), op::Parameter()));
}
TEST_F(DefuserTest, TrivialFusionInstructionNotAsRoot) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Fusion()));
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Negate(op::Add(op::Parameter(), op::Parameter())));
}
TEST_F(DefuserTest, NonTrivialFusionInstruction) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto param3 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction(
{add2, constant, div, mul, sub, negate, add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Constant(), op::Divide()));
}
TEST_F(DefuserTest, MultipleFusionInstructions) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto param3 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add2, constant, div, mul},
HloInstruction::FusionKind::kLoop);
computation->CreateFusionInstruction({sub, negate, add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(2, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Constant(), op::Divide()));
}
TEST_F(DefuserTest, NestedFusionInstructions) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto computation = m->AddEntryComputation(builder.Build());
auto outer_fusion = computation->CreateFusionInstruction(
{negate, add}, HloInstruction::FusionKind::kLoop);
HloInstruction* fused_negate = outer_fusion->fused_expression_root();
ASSERT_EQ(fused_negate->opcode(), HloOpcode::kNegate);
outer_fusion->fused_instructions_computation()->CreateFusionInstruction(
{fused_negate}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(2, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Add()));
}
}
} |
1,897 | cpp | tensorflow/tensorflow | all_reduce_splitter | third_party/xla/xla/service/gpu/transforms/all_reduce_splitter.cc | third_party/xla/xla/service/gpu/transforms/all_reduce_splitter_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_SPLITTER_H_
#define XLA_SERVICE_ALL_REDUCE_SPLITTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceSplitter : public HloModulePass {
public:
absl::string_view name() const override { return "all-reduce-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/all_reduce_splitter.h"
#include <cstdint>
#include <optional>
#include <string>
#include <variant>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_opt_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct ARReplicaGroups {
std::vector<ReplicaGroup> first_ar_replica_groups;
std::vector<ReplicaGroup> second_ar_replica_groups;
};
struct AllReduceRewriteSpec {
int split_dim;
int group_size;
HloAllReduceInstruction* all_reduce;
HloDynamicSliceInstruction* dynamic_slice;
ARReplicaGroups replica_groups;
std::string ToString() {
return absl::Substitute(
"{\n split_dim=$0\n group_size=$1\n all_reduce=$2\n "
"dynamic_slice=$3\n}\n",
split_dim, group_size, all_reduce->ToString(),
dynamic_slice->ToString());
}
};
struct RewriteInfeasibleReason {
const HloInstruction* ar;
std::string message;
};
struct ReplicaGroups {
std::vector<ReplicaGroup> replica_groups;
template <typename H>
friend H AbslHashValue(H h, const ReplicaGroups& rg) {
return H::combine(std::move(h), rg.replica_groups.size());
}
friend bool operator==(const ReplicaGroups& item,
const ReplicaGroups& other) {
if (item.replica_groups.size() != other.replica_groups.size()) {
return false;
}
for (int i = 0; i < item.replica_groups.size(); i++) {
const ReplicaGroup& item_replica_group = item.replica_groups[i];
const ReplicaGroup& other_replica_group = other.replica_groups[i];
for (int i = 0; i < item_replica_group.replica_ids_size(); i++) {
if (item_replica_group.replica_ids(i) !=
other_replica_group.replica_ids(i)) {
return false;
}
}
}
return true;
}
};
using ARReplicaGroupMap =
absl::flat_hash_map<ReplicaGroups,
std::vector<const HloAllReduceInstruction*>>;
using RewriteDecision =
std::variant<AllReduceRewriteSpec, RewriteInfeasibleReason>;
std::optional<int> GetSplitDim(const HloAllReduceInstruction& ar,
const HloDynamicSliceInstruction& ds) {
int split_dim = -1;
int num_dims = 0;
for (int64_t dim = 0; dim < ar.shape().rank(); ++dim) {
if (ar.shape().dimensions(dim) != ds.shape().dimensions(dim)) {
num_dims++;
split_dim = dim;
}
}
if (num_dims != 1) {
VLOG(2) << "No support for multiple nor 0 split dims.";
return std::nullopt;
}
return split_dim;
}
std::optional<int> GetProcessGroupSize(const HloAllReduceInstruction& ar,
const HloDynamicSliceInstruction& ds) {
CHECK(ds.operand(0) == &ar) << "Irrelevant AR + DS pair.";
std::optional<int> split_dim = GetSplitDim(ar, ds);
if (!split_dim.has_value()) {
return std::nullopt;
}
return ar.shape().dimensions(*split_dim) /
ds.dynamic_slice_sizes()[*split_dim];
}
ARReplicaGroupMap GetReplicaGroupsMap(HloComputation& computation) {
ARReplicaGroupMap map;
hlo_query::ForEachInstructionWithOpcode(
computation, HloOpcode::kAllReduce,
[&map](const HloInstruction* instruction) {
const HloAllReduceInstruction* ar =
Cast<HloAllReduceInstruction>(instruction);
auto rgs = ReplicaGroups{ar->replica_groups()};
map[rgs].push_back(ar);
});
return map;
}
ARReplicaGroups GetNewReplicaGroups(int group_size, int num_partitions) {
CHECK_EQ(num_partitions % group_size, 0);
std::vector<ReplicaGroup> first_ar_rgs, second_ar_rgs;
int num_units = num_partitions / group_size;
first_ar_rgs.reserve(num_units);
second_ar_rgs.reserve(group_size);
for (int u = 0; u < group_size * num_units; u += group_size) {
ReplicaGroup& group = first_ar_rgs.emplace_back();
for (int r = u; r < u + group_size; r++) {
group.add_replica_ids(r);
}
}
for (int g = 0; g < group_size; g++) {
ReplicaGroup& group = second_ar_rgs.emplace_back();
for (int r = g; r < group_size * num_units; r += group_size) {
group.add_replica_ids(r);
}
}
return {
first_ar_rgs,
second_ar_rgs,
};
}
bool IsLogicalReduceScatter(const HloModule& module,
const AllReduceRewriteSpec& spec,
HloComputation& computation) {
HloAllReduceInstruction& ar = *spec.all_reduce;
CHECK_EQ(ar.user_count(), 1);
CHECK_EQ(module.config().replica_count(), 1);
HloInstruction* first_ar =
computation.AddInstruction(HloInstruction::CreateAllReduce(
ar.shape(), ar.operands(), ar.to_apply(),
CollectiveDeviceList(spec.replica_groups.first_ar_replica_groups),
ar.constrain_layout(), hlo_query::NextChannelId(module),
ar.use_global_device_ids()));
HloInstruction* ds = ar.users()[0];
auto* old_operand = ds->mutable_operand(0);
if (!ds->ReplaceOperandWith(0, first_ar).ok()) {
return false;
}
absl::Cleanup _ = [&] {
CHECK_OK(ds->ReplaceOperandWith(0, old_operand));
CHECK_OK(computation.RemoveInstruction(first_ar));
};
return MatchReduceScatter(Cast<HloAllReduceInstruction>(first_ar),
module.config().num_partitions(),
module.config().replica_count(),
false,
true)
.has_value();
}
bool IsProfitableToSplit(const ARReplicaGroupMap& replica_map,
const AllReduceRewriteSpec& spec) {
auto new_rgs = spec.replica_groups;
bool first_replica_exists =
replica_map.contains(ReplicaGroups{new_rgs.first_ar_replica_groups});
bool second_replica_exists =
replica_map.contains(ReplicaGroups{new_rgs.second_ar_replica_groups});
return first_replica_exists || second_replica_exists;
}
RewriteDecision CanRewrite(const HloModule& module,
const ARReplicaGroupMap& replica_map,
HloComputation& computation,
HloInstruction& instruction) {
const HloModuleConfig& config = module.config();
if (config.use_auto_spmd_partitioning() || !config.use_spmd_partitioning() ||
config.replica_count() != 1) {
return RewriteInfeasibleReason{
&instruction,
"Supporting only SPMD partitioning scheme.",
};
}
if (instruction.opcode() != HloOpcode::kAllReduce) {
return RewriteInfeasibleReason{
&instruction,
"Cannot rewrite an AllReduce, since it's not AllReduce.",
};
}
auto* ar = Cast<HloAllReduceInstruction>(&instruction);
if (!ar->use_global_device_ids()) {
return RewriteInfeasibleReason{
&instruction,
"Only global ids are supported currently.",
};
}
if (ar->user_count() != 1 ||
ar->users().front()->opcode() != HloOpcode::kDynamicSlice) {
return RewriteInfeasibleReason{
&instruction,
"Cannot rewrite AllReduce if it is not a logical reduce scatter.",
};
}
auto* ds = Cast<HloDynamicSliceInstruction>(ar->users().front());
if (ds->user_count() > 1) {
return RewriteInfeasibleReason{
&instruction,
"Exactly one user of dynamic slice is required for a rewrite.",
};
}
int num_partitions = config.num_partitions();
std::vector<ReplicaGroup> rgs = ar->replica_groups();
if (rgs.size() != 1 || rgs.front().replica_ids_size() != num_partitions) {
return RewriteInfeasibleReason{
&instruction,
absl::StrCat("Cannot determine a valid split with num_partitions: ",
num_partitions),
};
}
std::optional<int> split_dim = GetSplitDim(*ar, *ds);
if (!split_dim.has_value()) {
return RewriteInfeasibleReason{
&instruction,
"Cannot get a split dim.",
};
}
std::optional<int> group_size = GetProcessGroupSize(*ar, *ds);
if (!group_size.has_value()) {
return RewriteInfeasibleReason{
&instruction,
"Cannot determine a group size.",
};
}
if (num_partitions == group_size) {
return RewriteInfeasibleReason{
&instruction,
"Nothing to rewrite",
};
}
if (num_partitions % *group_size != 0) {
return RewriteInfeasibleReason{
&instruction,
"Group size does not evenly divide the number of partitions",
};
}
auto spec = AllReduceRewriteSpec{
*split_dim,
*group_size,
ar,
ds,
GetNewReplicaGroups(*group_size, num_partitions),
};
if (!IsLogicalReduceScatter(module, spec, computation)) {
return RewriteInfeasibleReason{
&instruction,
"Not a logical reduce scatter.",
};
}
if (!IsProfitableToSplit(replica_map, spec)) {
return RewriteInfeasibleReason{
&instruction,
"Splitting is not profitable.",
};
}
return spec;
}
absl::StatusOr<bool> SplitAllReduce(const HloModuleConfig& config,
AllReduceRewriteSpec spec,
HloComputation& computation) {
int64_t next_channel_id =
hlo_query::NextChannelId(*spec.all_reduce->GetModule());
VLOG(1) << "AR splitting spec: " << spec.ToString();
int num_partitions = config.num_partitions();
int group_size = spec.group_size;
CHECK_EQ(num_partitions % group_size, 0);
HloAllReduceInstruction& ar = *spec.all_reduce;
HloDynamicSliceInstruction& ds = *spec.dynamic_slice;
const auto& [first_ar_replica_groups, second_ar_replica_groups] =
spec.replica_groups;
int channel_id = next_channel_id++;
HloInstruction* first_ar =
computation.AddInstruction(HloInstruction::CreateAllReduce(
ar.shape(), ar.operands(), ar.to_apply(),
CollectiveDeviceList(first_ar_replica_groups), ar.constrain_layout(),
channel_id, ar.use_global_device_ids()));
channel_id = next_channel_id++;
HloInstruction* second_ar =
computation.AddInstruction(HloInstruction::CreateAllReduce(
ds.shape(), {&ds}, ar.to_apply(),
CollectiveDeviceList(second_ar_replica_groups), ar.constrain_layout(),
channel_id, ar.use_global_device_ids()));
TF_RETURN_IF_ERROR(computation.ReplaceInstruction(&ar, first_ar));
if (ds.IsRoot()) {
computation.set_root_instruction(second_ar);
}
TF_RETURN_IF_ERROR(ds.ReplaceAllUsesWith(second_ar));
return true;
}
absl::StatusOr<bool> SplitAllReduce(const HloModule& module,
const ARReplicaGroupMap& replica_map,
HloComputation& computation,
HloInstruction& instruction) {
RewriteDecision spec =
CanRewrite(module, replica_map, computation, instruction);
if (std::holds_alternative<RewriteInfeasibleReason>(spec)) {
auto reason = std::get<RewriteInfeasibleReason>(spec);
VLOG(1) << "Cannot process {" << reason.ar->ToString()
<< "} due to : " << reason.message;
return false;
}
return SplitAllReduce(module.config(), std::get<AllReduceRewriteSpec>(spec),
computation);
}
}
absl::StatusOr<bool> AllReduceSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
ARReplicaGroupMap replica_map = GetReplicaGroupsMap(*computation);
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool rewritten, SplitAllReduce(*module, replica_map,
*computation, *instr));
changed |= rewritten;
}
}
return changed;
}
} | #include "xla/service/all_reduce_splitter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class AllReduceSplitterTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PrepareModule(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
return ParseAndReturnVerifiedModule(hlo_module, config);
}
size_t AllReduceCount(const HloModule &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
private:
size_t CollectiveCount(const HloModule &module, HloOpcode opcode) {
return absl::c_count_if(
module.entry_computation()->instructions(),
[&opcode](HloInstruction *instr) { return instr->opcode() == opcode; });
}
};
class AllReduceSplitterFilecheckTest : public AllReduceSplitterTest {
public:
absl::Status FileCheck(const std::string &hlo_text,
absl::string_view pattern) {
TF_ASSIGN_OR_RETURN(bool matched, RunFileCheck(hlo_text, pattern));
if (!matched) {
return absl::InternalError("Filecheck failed.");
}
return absl::OkStatus();
}
};
TEST_F(
AllReduceSplitterFilecheckTest,
MatchBasicPatternIfDynamicSliceIsRootAndThereExistsAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK: %[[AR0:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]}
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[AR0]], bf16[] %[[ZERO]])
CHECK: %[[AR1:.*]] = bf16[4096]{0} all-reduce(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-SAME: replica_groups={[[DESIRED_RGS]]}
CHECK: %[[DS:.*]] = bf16[1024]{0} dynamic-slice(bf16[4096]{0} %[[AR1]], s32[] %[[_:.*]])
CHECK-SAME: dynamic_slice_sizes={1024}
CHECK-NEXT: ROOT %[[AR2:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[DS]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
)"));
}
TEST_F(
AllReduceSplitterTest,
DoesNotMatchMatchBasicPatternIfDynamicSliceIsRootAndThereIsNoAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 1);
}
TEST_F(
AllReduceSplitterFilecheckTest,
MatchBasicPatternIfDynamicSliceIsNotRootAndThereExistsAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
zero = bf16[] constant(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0}
ROOT _ = tuple(broadcast, first.ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[P0]], bf16[] %[[ZERO]])
CHECK: %[[AR0:.*]] = bf16[4096]{0} all-reduce(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]}
CHECK: %[[DS:.*]] = bf16[1024]{0} dynamic-slice(bf16[4096]{0} %[[AR0]], s32[] %[[_:.*]])
CHECK-SAME: dynamic_slice_sizes={1024}
CHECK-NEXT: %[[AR1:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[DS]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
CHECK: %[[EXISTING_AR:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK-SAME: replica_groups={[[DESIRED_RGS]]}
CHECK: ROOT
CHECK-NOT: %[[AR1]]
CHECK-SAME: %[[EXISTING_AR]]
)"));
}
TEST_F(
AllReduceSplitterTest,
DoesNotMatchBasicPatternIfDynamicSliceIsNotRootAndThereIsNoAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
p.1 = bf16[2,4096,4096] parameter(1)
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0}
add = bf16[2,4096,4096] add(p,p.1)
ROOT _ = tuple(broadcast, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 1);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfDynamicSliceIsFullySharded) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(512)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[512] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={512}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 2);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfItIsNotCompiledWithSPMDPartitioning) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
HloModuleConfig config =
GetModuleConfigForTest(1, 8);
config.set_use_spmd_partitioning(false);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_THAT(AllReduceCount(*module), 2);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfUseGlobalDeviceIdsIsFalse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 2);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfIsNotCrossAllPartitionsAllReduce) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 2);
}
TEST_F(
AllReduceSplitterFilecheckTest,
PipelineMatchesBasicPatternWithDynamicSliceAsRootAndRewritesToReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
HloPassPipeline pipeline("all-reduce-splitter-rewrite");
pipeline.AddPass<AllReduceSplitter>();
pipeline.AddPass<ReduceScatterCreator>();
EXPECT_THAT(pipeline.Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK: %[[AR0:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]}
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[AR0]], bf16[] %[[ZERO]])
CHECK: %[[REDUCE_SCATTER:.*]] = bf16[1024]{0} reduce-scatter(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-SAME: replica_groups={[[DESIRED_RGS]]}
CHECK-NEXT: ROOT %[[AR2:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[REDUCE_SCATTER]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
)"));
}
TEST_F(
AllReduceSplitterFilecheckTest,
PipelineMatchesBasicPatternWithDynamicSliceNotAsRootAndRewritesToReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
zero = bf16[] constant(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0}
ROOT _ = tuple(broadcast, first.ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
HloPassPipeline pipeline("all-reduce-splitter-rewrite");
pipeline.AddPass<AllReduceSplitter>();
pipeline.AddPass<ReduceScatterCreator>();
EXPECT_THAT(pipeline.Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[P0]], bf16[] %[[ZERO]])
CHECK: %[[REDUCE_SCATTER:.*]] = bf16[1024]{0} reduce-scatter(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-NEXT: %[[AR1:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[REDUCE_SCATTER]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
CHECK: %[[EXISTING_AR:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK: ROOT
CHECK-NOT: %[[AR1]]
CHECK-SAME: %[[EXISTING_AR]]
)"));
}
}
}
} |
1,898 | cpp | tensorflow/tensorflow | bitcast_dtypes_expander | third_party/xla/xla/service/bitcast_dtypes_expander.cc | third_party/xla/xla/service/bitcast_dtypes_expander_test.cc | #include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
#ifndef XLA_SERVICE_BITCAST_DTYPES_EXPANDER_H_
#define XLA_SERVICE_BITCAST_DTYPES_EXPANDER_H_
namespace xla {
class BitcastDtypesExpander : public OpExpanderPass {
public:
absl::string_view name() const override { return "bitcast_dtypes_expander"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
private:
absl::flat_hash_map<std::string, HloComputation*> computation_cache_;
};
}
#endif
#include "xla/service/bitcast_dtypes_expander.h"
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "xla/client/lib/arithmetic.h"
#include "xla/client/lib/broadcast.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
absl::StatusOr<HloInstruction*> BitcastDtypesExpander::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* input = instruction->mutable_operand(0);
const Shape& from_shape = input->shape();
const Shape& to_shape = instruction->shape();
int input_bit_width = primitive_util::BitWidth(from_shape.element_type());
int output_bit_width = primitive_util::BitWidth(to_shape.element_type());
PrimitiveType input_logical_type =
primitive_util::UnsignedIntegralTypeForBitWidth(input_bit_width);
PrimitiveType output_logical_type =
primitive_util::UnsignedIntegralTypeForBitWidth(output_bit_width);
if (input_bit_width == output_bit_width) {
return instruction;
}
std::string name =
absl::StrFormat("xla.bitcast_convert_%s_2_%s", from_shape.ToString(),
to_shape.ToString());
HloModule* module = instruction->GetModule();
HloComputation*& computation =
computation_cache_.emplace(name, nullptr).first->second;
if (!computation) {
XlaBuilder b(name);
XlaOp input = Parameter(&b, 0, instruction->operand(0)->shape(), "a");
if (input_bit_width > output_bit_width) {
std::vector<int64_t> broadcasted_input_shape(
from_shape.dimensions().begin(), from_shape.dimensions().end());
std::vector<int64_t> reshaped_input_shape(from_shape.dimensions().begin(),
from_shape.dimensions().end());
broadcasted_input_shape.push_back(input_bit_width / output_bit_width);
reshaped_input_shape.push_back(1);
int64_t output_bit_width_mask = (int64_t{1} << output_bit_width) - 1;
TF_ASSIGN_OR_RETURN(input,
BroadcastTo(Reshape(input, reshaped_input_shape),
broadcasted_input_shape));
input = BitcastConvertType(input, input_logical_type);
TF_ASSIGN_OR_RETURN(Shape input_shape, b.GetShape(input));
XlaOp iota = Iota(&b, input_shape, input_shape.dimensions_size() - 1);
XlaOp iota_m = Mul(ScalarLike(input, output_bit_width), iota);
input = And(ShiftRightLogical(input, iota_m),
ScalarLike(input, output_bit_width_mask));
input = ConvertElementType(input, output_logical_type);
} else if (input_bit_width < output_bit_width) {
input = BitcastConvertType(input, input_logical_type);
input = ConvertElementType(input, output_logical_type);
XlaOp iota_m = Mul(
ConstantR0WithType(&b, output_logical_type, input_bit_width),
Iota(&b,
ShapeUtil::ChangeElementType(from_shape, output_logical_type),
from_shape.rank() - 1));
input = ShiftLeft(input, iota_m);
input = Reduce(input, Zero(&b, output_logical_type),
CreateScalarOrComputation(output_logical_type, &b),
{from_shape.rank() - 1});
}
BitcastConvertType(input, to_shape.element_type());
TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, b.Build());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
xla_computation.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(
xla_computation.proto(), config));
HloCloneContext context(module);
computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
}
return instruction->parent()->AddInstruction(HloInstruction::CreateCall(
instruction->shape(), instruction->operands(), computation));
}
bool BitcastDtypesExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBitcastConvert &&
primitive_util::BitWidth(instruction->shape().element_type()) !=
primitive_util::BitWidth(
instruction->operand(0)->shape().element_type());
}
} | #include "xla/service/bitcast_dtypes_expander.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class BitcastDtypesExpanderTest : public HloTestBase {};
TEST_F(BitcastDtypesExpanderTest, S32toS8) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_smaller
ENTRY main {
p = s32[10] parameter(0)
ROOT out = s8[10,4] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, S64toS32) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_smaller
ENTRY main {
p = s64[10] parameter(0)
ROOT out = s32[10,2] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, S8toS32) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_larger
ENTRY main {
p = s8[10,4] parameter(0)
ROOT out = s32[10] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, RewriteInsideWhileTest) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
converted_val2 = s8[4] bitcast-convert(val2)
converted_const = s8[4] bitcast-convert(const)
add = s8[4] add(converted_val2, converted_const)
out_add = s32[] bitcast-convert(add)
ROOT root = (f32[2], s32[]) tuple(val1, out_add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
}
}
} |
1,899 | cpp | tensorflow/tensorflow | reshape_mover | third_party/xla/xla/service/reshape_mover.cc | third_party/xla/xla/service/reshape_mover_test.cc | #ifndef XLA_SERVICE_RESHAPE_MOVER_H_
#define XLA_SERVICE_RESHAPE_MOVER_H_
#include "xla/service/hlo_pass_interface.h"
namespace xla {
struct ReshapeMoverOptions {
bool reshape_of_1d_broadcast_is_cheap = false;
};
class ReshapeMover : public HloModulePass {
public:
explicit ReshapeMover(
const ReshapeMoverOptions& options = ReshapeMoverOptions{})
: options_(options) {}
absl::string_view name() const override { return "reshape-mover"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> TryReshapeMoveOnCandidates(
HloInstructionSet* candidates);
absl::StatusOr<bool> SinkRearrangeOperands(HloInstruction* instruction);
absl::StatusOr<HloInstruction*> ApplyInverseRearrange(
const HloInstruction* rearrange, HloInstruction* operand);
bool IsReshapeMoveCandidate(HloInstruction* instruction);
const HloInstruction* FirstNontrivialRearrange(
absl::Span<const HloInstruction* const> instrs);
bool CanTriviallyRearrange(const HloInstruction* instr,
const HloInstruction* rearrange);
ReshapeMoverOptions options_;
};
}
#endif
#include "xla/service/reshape_mover.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/permutation_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsRearrange(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kReshape ||
instruction->opcode() == HloOpcode::kTranspose;
}
bool AreEquivalentRearranges(const HloInstruction* a, const HloInstruction* b) {
if (a->opcode() != b->opcode() ||
!ShapeUtil::SameDimensions(a->shape(), b->shape())) {
return false;
}
switch (a->opcode()) {
case HloOpcode::kTranspose:
return a->dimensions() == b->dimensions();
case HloOpcode::kReshape:
return ShapeUtil::SameDimensions(a->operand(0)->shape(),
b->operand(0)->shape());
default:
return false;
}
}
absl::InlinedVector<int64_t, 4> TransposedBcastDims(
absl::Span<const int64_t> bcast_dims,
absl::Span<const int64_t> transpose_dims) {
auto inv_perm = InversePermutation(transpose_dims);
absl::InlinedVector<int64_t, 4> new_bcast_dims;
for (int64_t dim : bcast_dims) {
new_bcast_dims.push_back(inv_perm[dim]);
}
return new_bcast_dims;
}
}
bool ReshapeMover::CanTriviallyRearrange(const HloInstruction* instr,
const HloInstruction* rearrange) {
CHECK(IsRearrange(rearrange)) << rearrange->ToString();
if (rearrange->opcode() == HloOpcode::kReshape &&
ShapeUtil::Equal(rearrange->shape(), rearrange->operand(0)->shape())) {
return true;
}
if (rearrange->opcode() == HloOpcode::kTranspose &&
IsIdentityPermutation(rearrange->dimensions())) {
return true;
}
if (instr->opcode() == HloOpcode::kConstant) {
return true;
}
if (instr->opcode() == HloOpcode::kRng && instr->user_count() == 1) {
return true;
}
if (instr->opcode() == HloOpcode::kBroadcast) {
if (!absl::c_is_sorted(instr->dimensions())) {
return false;
}
if (rearrange->opcode() == HloOpcode::kReshape) {
return ShapeUtil::IsScalar(instr->operand(0)->shape()) ||
(options_.reshape_of_1d_broadcast_is_cheap &&
ShapeUtil::TrueRank(instr->operand(0)->shape()) <= 1) ||
(options_.reshape_of_1d_broadcast_is_cheap &&
ShapeUtil::ReshapeLeavesDimensionsUnmodified(
rearrange->shape(),
rearrange->operand(0)->shape(),
instr->dimensions())
.has_value());
}
if (rearrange->opcode() == HloOpcode::kTranspose) {
return absl::c_is_sorted(TransposedBcastDims(
instr->dimensions(), InversePermutation(rearrange->dimensions())));
}
}
return false;
}
const HloInstruction* ReshapeMover::FirstNontrivialRearrange(
absl::Span<const HloInstruction* const> instrs) {
auto rearrange_it = absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return IsRearrange(instr) &&
!CanTriviallyRearrange(instr->operand(0), instr);
});
if (rearrange_it == instrs.end()) {
return nullptr;
}
return *rearrange_it;
}
bool ReshapeMover::IsReshapeMoveCandidate(HloInstruction* instruction) {
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
VLOG(5) << "** Checking instruction: "
<< instruction->ToString(print_no_metadata);
if (!instruction->IsElementwise()) {
return false;
}
const HloInstruction* rearrange =
FirstNontrivialRearrange(instruction->operands());
if (rearrange == nullptr) {
return false;
}
return absl::c_all_of(
instruction->operands(), [&](const HloInstruction* operand) {
return (IsRearrange(operand) &&
AreEquivalentRearranges(operand, rearrange)) ||
(!IsRearrange(operand) &&
CanTriviallyRearrange(operand, rearrange));
});
}
absl::StatusOr<HloInstruction*> ReshapeMover::ApplyInverseRearrange(
const HloInstruction* rearrange, HloInstruction* operand) {
switch (rearrange->opcode()) {
case HloOpcode::kReshape: {
Shape new_shape = ShapeUtil::ChangeElementType(
rearrange->operand(0)->shape(), operand->shape().element_type());
if (operand->shape() != new_shape) {
return MakeReshapeHlo(new_shape, operand);
} else {
return operand;
}
}
case HloOpcode::kTranspose: {
if (!IsIdentityPermutation(rearrange->dimensions())) {
return MakeTransposeHlo(operand,
InversePermutation(rearrange->dimensions()));
} else {
return operand;
}
}
default:
LOG(FATAL) << "Invalid rearrange op: " << rearrange->ToString();
}
}
absl::StatusOr<bool> ReshapeMover::SinkRearrangeOperands(
HloInstruction* instruction) {
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
HloComputation* computation = instruction->parent();
const HloInstruction* rearrange =
FirstNontrivialRearrange(instruction->operands());
CHECK(rearrange != nullptr);
const Shape& new_operand_shape = rearrange->operand(0)->shape();
VLOG(3) << "** Sinking reshape or transpose: "
<< instruction->ToString(print_no_metadata)
<< "\n\tfirst rearrange operand: "
<< rearrange->ToString(print_no_metadata)
<< "\n\tnew operand shape: "
<< ShapeUtil::HumanString(new_operand_shape);
auto operands = instruction->operands();
for (size_t i = 0; i < operands.size(); ++i) {
VLOG(3) << "Updating operand #" << i << ": "
<< operands[i]->ToString(print_no_metadata);
TF_ASSIGN_OR_RETURN(operands[i],
ApplyInverseRearrange(rearrange, operands[i]));
VLOG(3) << "Updated operand #" << i
<< " to: " << operands[i]->ToString(print_no_metadata);
}
HloInstruction* new_elementwise =
computation->AddInstruction(instruction->CloneWithNewOperands(
ShapeUtil::ChangeElementType(new_operand_shape,
instruction->shape().element_type()),
operands));
std::unique_ptr<HloInstruction> new_rearrange;
switch (rearrange->opcode()) {
case HloOpcode::kReshape:
VLOG(3) << "Creating new reshape for new elementwise op: "
<< new_elementwise->ToString(print_no_metadata);
new_rearrange =
HloInstruction::CreateReshape(instruction->shape(), new_elementwise);
break;
case HloOpcode::kTranspose:
new_rearrange = HloInstruction::CreateTranspose(
instruction->shape(), new_elementwise, rearrange->dimensions());
break;
default:
LOG(FATAL) << "Bad opcode";
}
if (instruction->has_sharding()) {
new_elementwise->clear_sharding();
}
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
instruction, std::move(new_rearrange)));
return true;
}
absl::StatusOr<bool> ReshapeMover::TryReshapeMoveOnCandidates(
HloInstructionSet* candidates) {
bool removed = true;
while (!candidates->empty() && removed) {
if (VLOG_IS_ON(5)) {
for (const HloInstruction* instruction : *candidates) {
VLOG(5) << "candidate " << instruction->ToString();
}
}
ConstHloInstructionSet rearrange_operands;
for (const HloInstruction* instruction : *candidates) {
for (const auto* operand : instruction->operands()) {
if (IsRearrange(operand)) {
rearrange_operands.insert(operand);
}
}
}
removed = false;
for (auto operand : rearrange_operands) {
if (absl::c_any_of(operand->users(), [&](HloInstruction* user) {
return !candidates->count(user);
})) {
for (auto* user : operand->users()) {
removed |= candidates->erase(user) > 0;
}
}
}
}
if (candidates->empty()) {
return false;
}
for (HloInstruction* instruction : *candidates) {
if (!ConsumeFuel("reshape-mover", [&] {
return absl::StrCat("instruction: ", instruction->ToString(),
"\nFull module:\n",
instruction->GetModule()->ToString());
})) {
break;
}
TF_ASSIGN_OR_RETURN(bool did_change, SinkRearrangeOperands(instruction));
CHECK(did_change);
}
return true;
}
absl::StatusOr<bool> ReshapeMover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
HloInstructionSet candidates;
for (HloInstruction* instruction : comp->instructions()) {
if (IsReshapeMoveCandidate(instruction)) {
candidates.insert(instruction);
}
}
TF_ASSIGN_OR_RETURN(bool did_change,
TryReshapeMoveOnCandidates(&candidates));
changed |= did_change;
}
return changed;
}
} | #include "xla/service/reshape_mover.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = xla::match;
class ReshapeMoverTest : public HloTestBase {
protected:
absl::Status RunPass(HloModule* module, bool change_expected,
ReshapeMoverOptions options = ReshapeMoverOptions{}) {
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(ReshapeMover(options), module));
SCOPED_TRACE(module->ToString());
EXPECT_EQ(changed, change_expected);
TF_EXPECT_OK(RunHloPass(HloVerifier(HloVerifierOpts()), module).status());
TF_EXPECT_OK(RunHloPass(HloPassFix<AlgebraicSimplifier>(
AlgebraicSimplifierOptions()),
module)
.status());
return absl::OkStatus();
}
};
TEST_F(ReshapeMoverTest, ReshapesWithDifferentInputShapesNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
reshape1 = f32[8,7] reshape(f32[1,8,7,1] parameter(1))
ROOT add = add(reshape0, reshape1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, OneConstantAndOneReshapesOnRngNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
rng = f32[1,8,1,7,1] rng(f32[] constant(0), f32[] constant(1)), distribution=rng_uniform
ROOT add = add(f32[8,7] reshape(rng), f32[8,7] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, EquivalentReshapesMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
reshape1 = f32[8,7] reshape(f32[1,8,1,7] parameter(1))
ROOT add = f32[8,7] add(reshape0, reshape1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, SinkReshapeBelowSelect) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = f32[2,3] select(
pred[2,3] reshape(pred[6] parameter(0)),
f32[2,3] reshape(f32[6] parameter(1)),
f32[2,3] reshape(f32[6] parameter(2)))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2)))));
}
TEST_F(ReshapeMoverTest, SinkReshapeBelowSelectWithConstant) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = f32[2,3] select(
pred[2,3] reshape(pred[6] parameter(0)),
f32[2,3] reshape(f32[6] parameter(1)),
f32[2,3] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),
m::Reshape(m::Constant())))));
}
TEST_F(ReshapeMoverTest, OneParameterAndOneReshapeNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
ROOT add = add(reshape0, f32[8,7] parameter(1))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, DontSinkReshapesOfConstants) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = select(
pred[3,2] parameter(0),
f32[3,2] reshape(f32[2,3] constant({...})),
f32[3,2] reshape(f32[2,3] constant({...})))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, OneNontrivialReshapeMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT add = add(
f32[3,2] reshape(f32[2,3] parameter(0)),
f32[3,2] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Parameter(0), m::Reshape(m::Constant())))));
}
TEST_F(ReshapeMoverTest, MultipleReshapes) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
add0 = f32[8,7,1] add(
f32[8,7,1] reshape(f32[1,8,1,7] parameter(0)),
f32[8,7,1] reshape(f32[1,8,1,7] parameter(1)))
ROOT add1 = f32[8,7] add(
f32[8,7] reshape(add0),
f32[8,7] reshape(f32[8,7,1] parameter(2)))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1))),
m::Parameter(2)))));
}
TEST_F(ReshapeMoverTest, SinkTransposeAcrossBroadcastScalar) {
const std::string hlo_string = R"(
HloModule TransposeMulInversedTransposeModule
ENTRY TransposeMulInversedTranspose {
src0 = f32[20,8]{1,0} parameter(0)
transpose0 = f32[8,20]{1,0} transpose(src0), dimensions={1,0}
src1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(src1), dimensions={}
ROOT multiply0 = f32[8,20]{1,0} multiply(transpose0, broadcast0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(m::Multiply(
m::Parameter(0), m::Broadcast(m::Parameter(1))))));
}
TEST_F(ReshapeMoverTest, ReshapeWithUsersOutsideCandidatesNotSink) {
const std::string hlo_string = R"(
HloModule ReshapeWithUsersOutsideCandidates
ENTRY ReshapeWithMultipleUsers {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
param1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}
param2 = f32[20,8]{1,0} parameter(2)
reshape1 = f32[8,20]{1,0} reshape(param2)
param3 = f32[20,8]{1,0} parameter(3)
reshape2 = f32[8,20]{1,0} reshape(param3)
param4 = f32[8,20]{1,0} parameter(4)
add0 = f32[8,20]{1,0} add(reshape0, broadcast0)
add1 = f32[8,20]{1,0} add(reshape0, reshape1)
add2 = f32[8,20]{1,0} add(reshape1, param4)
ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},
f32[8,20]{1,0}) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink1) {
const std::string hlo_string = R"(
HloModule ReshapeNoUsersOutsideCandidates1
ENTRY ReshapeWithMultipleUsers1 {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
param1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}
param2 = f32[20,8]{1,0} parameter(2)
reshape1 = f32[8,20]{1,0} reshape(param2)
param3 = f32[20,8]{1,0} parameter(3)
reshape2 = f32[8,20]{1,0} reshape(param3)
add0 = f32[8,20]{1,0} add(reshape0, broadcast0)
add1 = f32[8,20]{1,0} add(reshape0, reshape1)
add2 = f32[8,20]{1,0} add(reshape1, reshape2)
ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},
f32[8,20]{1,0}) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Reshape(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))),
m::Reshape(m::Add(m::Parameter(0), m::Parameter(2))),
m::Reshape(m::Add(m::Parameter(2), m::Parameter(3))))));
}
TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink2) {
const std::string hlo_string = R"(
HloModule ReshapeNoUsersOutsideCandidates2
ENTRY ReshapeWithMultipleUsers2 {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
ROOT add0 = f32[8,20]{1,0} add(reshape0, reshape0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Add())));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsNotTrivial) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] reshape(f32[6] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsTrivial) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] reshape(f32[6] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
ReshapeMoverOptions options;
options.reshape_of_1d_broadcast_is_cheap = true;
TF_ASSERT_OK(RunPass(m.get(), true, options));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Reshape(m::Broadcast(m::Parameter(0))), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank2BroadcastIsAllowed) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,35] reshape(f32[2,3,5,7] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
ReshapeMoverOptions options;
options.reshape_of_1d_broadcast_is_cheap = true;
TF_ASSERT_OK(RunPass(m.get(), true, options));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, SinkDisallowedIfReshapeChangesBroadcastDims) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,35] reshape(f32[6,5,7] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, TransposeOfBroadcastIsAllowed) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] transpose(f32[3,2] parameter(1)), dimensions={1,0}
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, TransposeReordersBroadcastDims) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,5] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,5] transpose(f32[3,2,5] parameter(1)), dimensions={1,0,2}
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ShardingConsistencyPreservation) {
const std::string hlo_string = R"(
HloModule module
ENTRY entry {
copy.2424 = bf16[3,16,128]{2,1,0} parameter(0), sharding={replicated}
dot.987 = bf16[3,16,128,4096]{3,2,1,0} parameter(1), sharding={devices=[1,8,1,1]0,1,2,3,4,5,6,7}
reshape.5843 = bf16[3,16,128,1,4096]{4,3,2,1,0} reshape(dot.987), sharding={devices=[1,8,1,1,1]0,1,2,3,4,5,6,7}
transpose.21172 = bf16[3,1,4096,16,128]{2,1,4,3,0} transpose(reshape.5843), dimensions={0,3,4,1,2}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
reshape.291 = bf16[3,16,128]{2,1,0} reshape(copy.2424), sharding={devices=[1,8,1]0,1,2,3,4,5,6,7}
broadcast.21176 = bf16[3,1,4096,16,128]{4,3,2,1,0} broadcast(reshape.291), dimensions={0,3,4}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
multiply.21177 = bf16[3,1,4096,16,128]{2,1,4,3,0} multiply(transpose.21172, broadcast.21176), sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
ROOT slice.21180 = bf16[1,1,4096,16,128]{4,3,2,1,0} slice(multiply.21177), slice={[1:2], [0:1], [0:4096], [0:16], [0:128]}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
auto elementwise_op = FindInstruction(m.get(), HloOpcode::kMultiply);
EXPECT_FALSE(elementwise_op->has_sharding());
}
}
} |